Esempio n. 1
0
def test_reload_prod(capfd: Capture, faker: Faker) -> None:
    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="no",
        frontend="angular",
    )

    init_project(capfd, " --prod ", "--force")

    start_registry(capfd)
    pull_images(capfd)

    start_project(capfd)

    time.sleep(5)

    exec_command(capfd, "reload backend", "Reloading gunicorn (PID #")

    exec_command(
        capfd,
        "reload",
        "Can't reload the frontend if not explicitly requested",
        "Services reloaded",
    )

    docker = Docker()
    container = docker.get_container("frontend")
    assert container is not None

    docker.client.container.stop(container[0])
    exec_command(capfd, "reload frontend", "Reloading frontend...")

    container = docker.get_container("frontend")

    if Configuration.swarm_mode:
        # frontend reload is always execute in compose mode
        # => the container retrieved from docker.get_container in swarm mode is None
        assert container is None
        # Let's retrieve the container name in compose mode:

        Configuration.swarm_mode = False
        docker = Docker()
        container = docker.get_container("frontend")

        # Let's restore the docker client
        Configuration.swarm_mode = True
        docker = Docker()

    assert container is not None

    docker.client.container.remove(container[0], force=True)
    exec_command(capfd, "reload frontend", "Reloading frontend...")

    exec_command(
        capfd,
        "reload frontend backend",
        "Can't reload frontend and other services at once",
    )
    exec_command(capfd, "remove", "Stack removed")
Esempio n. 2
0
def test_reload_dev(capfd: Capture, faker: Faker) -> None:
    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="no",
        frontend="no",
    )
    init_project(capfd)
    start_registry(capfd)

    pull_images(capfd)

    start_project(capfd)

    time.sleep(5)

    # For each support service verify:
    #   1) a start line in the logs
    #   2) the container is not re-created after the command
    #   3) the start line in the logs is printed again
    #   4) some more deep check based on the service?
    #      For example API is loading a change in the code?
    exec_command(capfd, "reload backend", "Reloading Flask...")

    exec_command(capfd, "remove", "Stack removed")

    if Configuration.swarm_mode:
        exec_command(capfd, "remove registry", "Service registry removed")
Esempio n. 3
0
def test_cronjobs(capfd: Capture, faker: Faker) -> None:

    project = random_project_name(faker)
    create_project(
        capfd=capfd,
        name=project,
        auth="postgres",
        frontend="no",
    )
    init_project(capfd, "-e CRONTAB_ENABLE=1")
    start_registry(capfd)
    pull_images(capfd)
    start_project(capfd)

    exec_command(capfd, "status")

    exec_command(
        capfd,
        "logs --tail 50 backend",
        # Logs are not prefixed because only one service is shown
        "Found no cronjob to be enabled, skipping crontab setup",
        "Testing mode",
    )

    with open(f"projects/{project}/backend/cron/hello-world.cron", "w+") as f:
        f.write("* * * * * echo 'Hello world' >> /var/log/cron.log 2>&1\n")
        f.write("\n")

    exec_command(
        capfd,
        "-e CRONTAB_ENABLE=1 start --force",
        "Stack started",
    )

    if Configuration.swarm_mode:
        time.sleep(10)

    exec_command(
        capfd,
        "logs --tail 50 backend",
        # Logs are not prefixed because only one service is shown
        # "Testing mode",
        "Enabling cron...",
        "Cron enabled",
        # this is the output of crontab -l that verifies the cronjob installation
        "* * * * * echo 'Hello world'",
    )
Esempio n. 4
0
def test_all(capfd: Capture) -> None:

    exec_command(capfd, "restart", "This command is no longer available")

    create_project(
        capfd=capfd,
        name="first",
        auth="postgres",
        frontend="no",
    )
    init_project(capfd)
    start_registry(capfd)
    pull_images(capfd)
    start_project(capfd)

    start_date1 = get_container_start_date(capfd, "backend")
    exec_command(
        capfd,
        "start",
        "Stack started",
    )

    start_date2 = get_container_start_date(capfd, "backend")

    # The service is not restarted because its definition is unchanged
    assert start_date1 == start_date2

    if Configuration.swarm_mode:
        exec_command(
            capfd,
            "remove backend",
            "first_backend scaled to 0",
            "verify: Service converged",
            "Services removed",
        )

    exec_command(
        capfd,
        "start --force",
        "Stack started",
    )

    start_date3 = get_container_start_date(capfd, "backend")

    assert start_date2 != start_date3
Esempio n. 5
0
def test_password(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "password")

    project_name = random_project_name(faker)
    create_project(
        capfd=capfd,
        name=project_name,
        auth="postgres",
        frontend="no",
    )

    init_project(capfd)
    start_registry(capfd)

    exec_command(
        capfd,
        "password backend",
        "Please specify one between --random and --password options",
    )
Esempio n. 6
0
def test_remove(capfd: Capture) -> None:

    execute_outside(capfd, "remove")

    create_project(
        capfd=capfd,
        name="rem",
        auth="postgres",
        frontend="no",
    )
    init_project(capfd, " -e HEALTHCHECK_INTERVAL=20s ")

    start_registry(capfd)

    pull_images(capfd)

    if Configuration.swarm_mode:
        # In swarm mode single service remove is not permitted if nothing is running
        exec_command(
            capfd,
            "remove postgres",
            f"Stack rem is not running, deploy it with {colors.RED}rapydo start",
        )

    # Even if nothing is running, remove is permitted both on Compose and Swarm
    exec_command(capfd, "remove", "Stack removed")

    NONE: List[str] = []
    if Configuration.swarm_mode:
        BACKEND_ONLY = ["rem_backend"]
        ALL = ["rem_backend", "rem_postgres"]
    else:
        BACKEND_ONLY = ["rem-backend"]
        ALL = ["rem-backend", "rem-postgres"]

    assert get_containers() == NONE

    start_project(capfd)

    if Configuration.swarm_mode:
        NETWORK_NAME = "rem_swarm_default"
    else:
        NETWORK_NAME = "rem_compose_default"

    assert get_containers() == ALL

    NAMED_VOLUMES_NUM, UNNAMED_VOLUMES_NUM = count_volumes()

    if Configuration.swarm_mode:
        # In swarm mode remove single service is equivalent to scale 0
        exec_command(
            capfd,
            "remove postgres",
            "rem_postgres scaled to 0",
            "verify: Service converged",
            "Services removed",
        )

        assert get_containers() == BACKEND_ONLY
        # Single service remove does not remove the network
        assert NETWORK_NAME in get_networks()
        # Single service remove also remove unnamed volumes
        time.sleep(2)
        n, u = count_volumes()
        assert NAMED_VOLUMES_NUM == n
        assert UNNAMED_VOLUMES_NUM > u

        exec_command(
            capfd,
            "start",
            "Stack started",
        )

        time.sleep(2)

        assert get_containers() == ALL

        NAMED_VOLUMES_NUM, UNNAMED_VOLUMES_NUM = count_volumes()

        exec_command(
            capfd,
            "remove",
            "Stack removed",
        )

        assert get_containers() == NONE
        # Removal of all services also drop the network
        assert NETWORK_NAME not in get_networks()
        # Removal of all services also remove unnamed volumes
        n, u = count_volumes()
        assert NAMED_VOLUMES_NUM == n
        assert UNNAMED_VOLUMES_NUM > u
    else:

        exec_command(
            capfd,
            "remove postgres",
            "Stack removed",
        )

        assert get_containers() == BACKEND_ONLY
        # Single service remove does not remove the network
        assert NETWORK_NAME in get_networks()
        # Removal of all services does not remove any volume
        n, u = count_volumes()
        assert NAMED_VOLUMES_NUM == n
        assert UNNAMED_VOLUMES_NUM == u

        exec_command(
            capfd,
            "remove",
            "Stack removed",
        )

        assert get_containers() == NONE
        # Removal of all services also drop the network
        # assert NETWORK_NAME not in get_networks()

        # Networks are not removed, but based on docker compose down --help they should
        # Also docker-compose down removes network from what I remember
        # Should be reported as bug? If corrected this check will start to fail
        assert NETWORK_NAME in get_networks()

        # Removal of all services does not remove any volume
        n, u = count_volumes()
        assert NAMED_VOLUMES_NUM == n
        assert UNNAMED_VOLUMES_NUM == u

        start_project(capfd)

        assert get_containers() == ALL

        exec_command(
            capfd,
            "remove --all postgres",
            "Stack removed",
        )

        assert get_containers() == BACKEND_ONLY
        # Removal of all services with --all flag remove unnamed volumes
        n, u = count_volumes()
        assert NAMED_VOLUMES_NUM == n
        # This locally works... but not on GA ... mistery
        # assert UNNAMED_VOLUMES_NUM > u

        # New counts, after single service --all has removed some unnamed volume
        NAMED_VOLUMES_NUM, UNNAMED_VOLUMES_NUM = count_volumes()

        exec_command(capfd, "remove --all", "Stack removed")

        assert get_containers() == NONE
        n, u = count_volumes()
        assert NAMED_VOLUMES_NUM > n
        assert UNNAMED_VOLUMES_NUM > u

    if Configuration.swarm_mode:
        # Remove the registry
        exec_command(
            capfd,
            "remove registry",
            "Service registry removed",
        )

        # Verify that the registry is no longer running
        exec_command(
            capfd,
            "start",
            "Registry 127.0.0.1:5000 not reachable.",
        )

        exec_command(
            capfd,
            "remove registry",
            "Service registry is not running",
        )

        # Mix both registry and normal services
        exec_command(
            capfd,
            "remove registry postgres",
            # Registry is already removed, can't remove it again
            # But this is enough to confirm that registry and services can be mixed up
            "Service registry is not running",
            # The main stack is already removed, can't remove postgres
            # But this is enough to confirm that registry and services can be mixed up
            "Stack rem is not running, deploy it with",
        )

        start_registry(capfd)

    exec_command(
        capfd,
        "run --detach --pull --port 7777 adminer",
        "You can access Adminer interface",
    )
    exec_command(
        capfd,
        "run --detach --pull --port 8888 swaggerui",
        "You can access SwaggerUI web page",
    )

    exec_command(
        capfd,
        "remove adminer postgres swaggerui",
        "Service adminer removed",
        "Service swaggerui removed",
    )

    exec_command(
        capfd,
        "remove adminer postgres swaggerui",
        "Service adminer is not running",
        "Service swaggerui is not running",
    )

    assert get_containers() == NONE
    # Verify that removal of interfaces does not stop the main stack, if not requested
    exec_command(capfd, "start backend", "Stack started")
    time.sleep(2)
    assert get_containers() == BACKEND_ONLY
    exec_command(capfd, "remove adminer", "Service adminer is not running")
    assert get_containers() == BACKEND_ONLY

    exec_command(capfd, "remove", "Stack removed")
Esempio n. 7
0
def test_debug_run(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "run backend")

    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="no",
        frontend="no",
    )
    init_project(capfd)

    start_registry(capfd)

    exec_command(
        capfd,
        "volatile backend",
        "Volatile command is replaced by rapydo run --debug backend",
    )

    img = f"rapydo/backend:{__version__}"
    exec_command(
        capfd,
        "run --debug backend",
        f"Missing {img} image, add {colors.RED}--pull{colors.RESET} option",
    )

    pull_images(capfd)
    # start_project(capfd)

    # exec_command(
    #     capfd,
    #     "run --debug backend --command hostname",
    #     "Bind for 0.0.0.0:8080 failed: port is already allocated",
    # )

    # exec_command(
    #     capfd,
    #     "remove",
    #     "Stack removed",
    # )

    exec_command(
        capfd,
        "run backend --command hostname",
        "Can't specify a command if debug mode is OFF",
    )

    exec_command(
        capfd,
        "run backend --command hostname --user developer",
        "Can't specify a user if debug mode is OFF",
    )

    exec_command(
        capfd,
        "run --debug backend --command hostname",
        "backend-server",
    )

    exec_command(
        capfd,
        "run --debug backend --command whoami",
        "root",
    )

    exec_command(
        capfd,
        "run --debug backend -u developer --command whoami",
        "Please remember that users in volatile containers are not mapped on current ",
        "developer",
    )

    exec_command(
        capfd,
        "run --debug backend -u invalid --command whoami",
        "Error response from daemon:",
        "unable to find user invalid:",
        "no matching entries in passwd file",
    )
Esempio n. 8
0
def test_all(capfd: Capture) -> None:

    execute_outside(capfd, "status")

    create_project(
        capfd=capfd,
        name="first",
        auth="postgres",
        frontend="no",
    )
    init_project(capfd)
    start_registry(capfd)
    pull_images(capfd)

    if Configuration.swarm_mode:
        exec_command(
            capfd,
            "status",
            "Manager",
            "Ready+Active",
            "No service is running",
        )
    else:
        exec_command(
            capfd,
            "status",
            "No container is running",
        )

    start_project(capfd)

    if Configuration.swarm_mode:

        exec_command(
            capfd,
            "status",
            "Manager",
            "Ready+Active",
            "first_backend",
            "first_postgres",
            " [1]",
            # No longer found starting because
            # HEALTHCHECK_INTERVAL is defaulted to 1s during tests
            # "starting",
            "running",
        )

        init_project(capfd, "", "--force")

        exec_command(
            capfd,
            "start --force",
            "Stack started",
        )

        time.sleep(4)

        exec_command(
            capfd,
            "status",
            "running",
        )

        exec_command(
            capfd,
            "status backend",
            "running",
        )

        exec_command(
            capfd,
            "status backend postgres",
            "running",
        )

    else:
        exec_command(
            capfd,
            "status",
            "first-backend-1",
        )

        exec_command(
            capfd,
            "status backend",
            "first-backend-1",
        )

        exec_command(
            capfd,
            "status backend postgres",
            "first-backend-1",
        )
Esempio n. 9
0
def test_interfaces(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "run adminer")

    project_name = random_project_name(faker)
    create_project(
        capfd=capfd,
        name=project_name,
        auth="postgres",
        frontend="no",
    )
    init_project(capfd)

    start_registry(capfd)

    exec_command(
        capfd,
        "interfaces sqlalchemy",
        "Deprecated interface sqlalchemy, use adminer instead",
    )

    exec_command(
        capfd,
        "interfaces adminer",
        "Interfaces command is replaced by rapydo run adminer",
    )

    exec_command(
        capfd,
        "run invalid",
        "Services misconfiguration, can't find invalid",
    )

    exec_command(
        capfd,
        "run adminer --port XYZ",
        "Invalid value for '--port' / '-p': 'XYZ' is not a valid integer",
    )

    img = f"rapydo/adminer:{__version__}"
    exec_command(
        capfd,
        "run adminer",
        f"Missing {img} image, add {colors.RED}--pull{colors.RESET} option",
    )

    # Launch Adminer UI with default port
    exec_command(
        capfd,
        "run adminer --pull --detach",
        "Pulling image for adminer...",
        # f"Creating {project_name}_adminer_run",
        "You can access Adminer interface on: http://localhost:7777",
    )

    # Admin or SwaggerUI does not start? You can debug with:
    # from python_on_whales import docker
    # assert docker.logs("adminer", tail=10) == "debug"

    exec_command(
        capfd,
        "remove adminer",
        "Service adminer removed",
    )

    # Launch Adminer UI with custom port
    exec_command(
        capfd,
        "run adminer --port 3333 --detach",
        # "Pulling adminer",
        # f"Creating {project_name}_adminer_run",
        "You can access Adminer interface on: http://localhost:3333",
    )

    # Launch Swagger UI with default port
    exec_command(
        capfd,
        "run swaggerui --pull --detach",
        "Pulling image for swaggerui...",
        "You can access SwaggerUI web page here: http://localhost:7777",
    )

    exec_command(
        capfd,
        "remove swaggerui",
        "Service swaggerui removed",
    )

    # Launch Swagger UI with custom port
    exec_command(
        capfd,
        "run swaggerui --port 4444 --detach",
        "You can access SwaggerUI web page here: http://localhost:4444",
    )

    # This fails if the interfaces are non running, i.e. in case of a post-start crash
    # Introduced after a BUG due to the tty setting in volatile container
    # that made run interfaces fail on GA
    exec_command(
        capfd,
        "remove adminer swaggerui",
        "Service adminer removed",
        "Service swaggerui removed",
    )

    # Test Swagger UI and Admin in production mode
    exec_command(
        capfd,
        "--prod init -f",
        "Created default .projectrc file",
        "Project initialized",
    )

    exec_command(
        capfd,
        "--prod run swaggerui --port 5555 --detach",
        "You can access SwaggerUI web page here: https://localhost:5555",
    )

    exec_command(
        capfd,
        "--prod run adminer --port 6666 --detach",
        "You can access Adminer interface on: https://localhost:6666",
    )
Esempio n. 10
0
def test_all(capfd: Capture) -> None:

    execute_outside(capfd, "start")
    if not Configuration.swarm_mode:
        execute_outside(capfd, "stop")

    project_name = "first"
    create_project(
        capfd=capfd,
        name=project_name,
        auth="neo4j",
        frontend="angular",
    )

    init_project(capfd)

    if Configuration.swarm_mode:
        exec_command(
            capfd,
            "start",
            "Registry 127.0.0.1:5000 not reachable.",
        )

        start_registry(capfd)

    exec_command(
        capfd,
        "start backend invalid",
        "No such service: invalid",
    )

    exec_command(
        capfd,
        "start backend",
        f"image, execute {colors.RED}rapydo pull backend",
    )

    pull_images(capfd)

    docker = Docker()

    if Configuration.swarm_mode:

        # Deploy a sub-stack
        exec_command(
            capfd,
            "start backend",
            "Enabled services: backend",
            "Stack started",
        )

        # Only backend is expected to be running
        assert docker.get_container("backend") is not None
        assert docker.get_container("neo4j") is None

        # Once started a stack in swarm mode, it's not possible
        # to re-deploy another stack
        # exec_command(
        #     capfd,
        #     "start",
        #     "A stack is already running",
        #     f"Stop it with {colors.RED}rapydo remove{colors.RESET} "
        #     "if you want to start a new stack",
        # )

        # Deploy an additional sub-stack
        exec_command(
            capfd,
            "start neo4j",
            "Enabled services: neo4j",
            "Stack started",
        )

        # In swarm mode new stack replaces the previous
        # => Only neo4j is expected to be running
        assert docker.get_container("backend") is None
        assert docker.get_container("neo4j") is not None

        exec_command(
            capfd,
            "remove",
            "Stack removed",
        )

        # Deploy the full stack
        exec_command(
            capfd,
            "start",
            "Stack started",
        )

        # Now both backend and neo4j are expected to be running
        assert docker.get_container("backend") is not None
        assert docker.get_container("neo4j") is not None

        # ############################
        # Verify bind volumes checks #
        # ############################

        exec_command(
            capfd,
            "remove",
            "Stack removed",
        )

        data_folder = DATA_DIR.joinpath(project_name)
        karma_folder = data_folder.joinpath("karma")

        # Delete data/project_name/karma and it will be recreated
        assert karma_folder.exists()
        shutil.rmtree(karma_folder)
        assert not karma_folder.exists()

        # set the data folder read only
        data_folder.chmod(0o550)

        # The missing folder can't be recreated due to permissions denied
        exec_command(
            capfd,
            "start frontend",
            "A bind folder is missing and can't be automatically created: ",
            f"/data/{project_name}/karma",
        )
        assert not karma_folder.exists()

        # Restore RW permissions
        data_folder.chmod(0o770)

        exec_command(
            capfd,
            "start frontend",
            "A bind folder was missing and was automatically created: ",
            f"/data/{project_name}/karma",
            "Stack started",
        )
        assert karma_folder.exists()
    else:

        # Deploy a sub-stack
        exec_command(
            capfd,
            "start backend",
            "Enabled services: backend",
            "Stack started",
        )

        # Only backend is expected to be running
        assert docker.get_container("backend") is not None
        assert docker.get_container("neo4j") is None

        # Deploy an additional sub-stack
        exec_command(
            capfd,
            "start neo4j",
            "Enabled services: neo4j",
            "Stack started",
        )

        # In compose mode additional stack are aggregated
        # => both backend and neo4j are expected to be running
        assert docker.get_container("backend") is not None
        assert docker.get_container("neo4j") is not None

        # exec_command(
        #     capfd,
        #     "start",
        #     "A stack is already running.",
        # )
        exec_command(
            capfd,
            "start",
            "Stack started",
        )
Esempio n. 11
0
def test_all(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "backup mariadb")
    execute_outside(capfd, "restore mariadb")

    backup_folder = BACKUP_DIR.joinpath("mariadb")

    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="mysql",
        frontend="no",
    )
    init_project(capfd)
    start_registry(capfd)

    exec_command(
        capfd,
        "backup mariadb",
        f"image, execute {colors.RED}rapydo pull mariadb",
    )
    exec_command(
        capfd,
        "restore mariadb",
        f"image, execute {colors.RED}rapydo pull mariadb",
    )

    pull_images(capfd)
    start_project(capfd)

    exec_command(capfd, "status")
    service_verify(capfd, "sqlalchemy")

    # This will initialize mariadb
    exec_command(capfd, "shell backend 'restapi init'")

    def exec_query(query: str) -> str:

        command = 'shell mariadb "'
        command += 'sh -c \'mysql -uroot -p"$MYSQL_ROOT_PASSWORD" -D"$MYSQL_DATABASE" '
        command += f'-e \\"{query};\\"'
        # This is to close the sh -c 'command'
        command += "'"
        # This is to close the shell "command"
        command += '"'

        return command

    # Verify the initialization
    exec_command(
        capfd,
        exec_query("select name, description from role"),
        "normal_user\tUser",
    )

    exec_command(
        capfd,
        "backup mariadb",
        "Starting backup on mariadb...",
        "Backup completed: data/backup/mariadb/",
    )

    # A second backup is needed to test backup retention
    exec_command(
        capfd,
        "backup mariadb",
        "Starting backup on mariadb...",
        "Backup completed: data/backup/mariadb/",
    )

    # Test backup retention
    exec_command(
        capfd,
        "backup mariadb --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on mariadb...",
        "Backup completed: data/backup/mariadb/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup mariadb --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on mariadb...",
        "Backup completed: data/backup/mariadb/",
    )

    exec_command(
        capfd,
        "backup mariadb --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on mariadb...",
        "Backup completed: data/backup/mariadb/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup mariadb --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on mariadb...",
        "Backup completed: data/backup/mariadb/",
    )

    # Create an additional backup to the test deletion (now backups are 3)
    exec_command(
        capfd,
        "backup mariadb",
        "Starting backup on mariadb...",
        "Backup completed: data/backup/mariadb/",
    )
    # Save the current number of backup files
    number_of_backups = len(list(backup_folder.glob("*")))

    # Verify the deletion
    exec_command(
        capfd,
        "backup mariadb --max 1",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on mariadb...",
        "Backup completed: data/backup/mariadb/",
    )

    # Now the number of backups should be reduced by 1 (i.e. +1 -2)
    assert len(list(backup_folder.glob("*"))) == number_of_backups - 1

    # Verify that --max ignores files without the date pattern
    backup_folder.joinpath("xyz").touch(exist_ok=True)
    backup_folder.joinpath("xyz.ext").touch(exist_ok=True)
    backup_folder.joinpath("2020_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_01").touch(exist_ok=True)
    backup_folder.joinpath("9999_01_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_99_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_99-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-99_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_99_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_99.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_99.tar").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_99.tar.gz").touch(exist_ok=True)

    exec_command(
        capfd,
        "backup mariadb --max 999 --dry-run",
        "Dry run mode is enabled",
        # Still finding 2, all files above are ignore because not matching the pattern
        "Found 2 backup files, maximum not reached",
        "Starting backup on mariadb...",
        "Backup completed: data/backup/mariadb/",
    )

    exec_command(
        capfd,
        "backup invalid",
        "Invalid value for",
        "'invalid' is not one of 'mariadb', 'neo4j', 'postgres', 'rabbit', 'redis'",
    )

    exec_command(capfd, "remove", "Stack removed")

    exec_command(
        capfd,
        "backup mariadb",
        "The backup procedure requires mariadb running, please start your stack",
    )

    exec_command(
        capfd,
        "restore mariadb",
        "Please specify one of the following backup:",
        ".tar.gz",
    )
    exec_command(
        capfd,
        "restore mariadb invalid",
        "Invalid backup file, data/backup/mariadb/invalid does not exist",
    )

    with TemporaryRemovePath(BACKUP_DIR):
        exec_command(
            capfd,
            "restore mariadb",
            "No backup found, the following folder "
            "does not exist: data/backup/mariadb",
        )

    with TemporaryRemovePath(backup_folder):
        exec_command(
            capfd,
            "restore mariadb",
            f"No backup found, the following folder does not exist: {backup_folder}",
        )

        os.mkdir("data/backup/mariadb")

        exec_command(
            capfd,
            "restore mariadb",
            "No backup found, data/backup/mariadb is empty",
        )

        open("data/backup/mariadb/test.tar.gz", "a").close()

        exec_command(
            capfd,
            "restore mariadb",
            "Please specify one of the following backup:",
            "test.tar.gz",
        )

        os.remove("data/backup/mariadb/test.tar.gz")

    files = os.listdir("data/backup/mariadb")
    files = [f for f in files if f.endswith(".tar.gz")]
    files.sort()
    mariadb_dump_file = files[-1]

    exec_command(capfd, "start", "Stack started")

    # Postgres restore not allowed if container is not running
    exec_command(
        capfd,
        f"restore mariadb {mariadb_dump_file}",
        "MariaDB is running and the restore will temporary stop it. "
        "If you want to continue add --force flag",
    )

    # Here we test the restore procedure:
    # 1) verify some data in the database
    exec_command(
        capfd,
        exec_query("select name, description from role"),
        "normal_user\tUser",
    )

    # 2) Modify the data
    exec_command(
        capfd,
        exec_query("update role SET description=name"),
    )
    exec_command(
        capfd,
        exec_query("select name, description from role"),
        "normal_user\tnormal_user",
    )

    # 3) restore the dump
    exec_command(
        capfd,
        f"restore mariadb {mariadb_dump_file} --force",
        "Starting restore on mariadb...",
        "Opening backup file",
        "Removing current datadir",
        "Restoring the backup",
        "...done",
        "completed OK!",
        "Removing the temporary uncompressed folder",
        f"Restore from data/backup/mariadb/{mariadb_dump_file} completed",
    )

    if Configuration.swarm_mode:
        time.sleep(5)

    # 4) verify data match again point 1 (restore completed)
    exec_command(
        capfd,
        exec_query("select name, description from role"),
        "normal_user\tUser",
    )
Esempio n. 12
0
def test_all(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "backup rabbit")
    execute_outside(capfd, "restore rabbit")

    backup_folder = BACKUP_DIR.joinpath("rabbit")

    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="no",
        frontend="no",
        services=["rabbit"],
    )
    init_project(capfd)
    start_registry(capfd)

    exec_command(
        capfd,
        "backup rabbit",
        f"image, execute {colors.RED}rapydo pull rabbit",
    )
    exec_command(
        capfd,
        "restore rabbit",
        f"image, execute {colors.RED}rapydo pull rabbit",
    )

    pull_images(capfd)
    start_project(capfd)

    exec_command(capfd, "status")
    service_verify(capfd, "rabbitmq")

    # Just some delay extra delay, rabbit is a slow starter
    time.sleep(5)

    # NOTE: q = rabbitmq.__name__ is just to have a fixed name to be used to test the
    # queue without the need to introdure further nested " or '
    query_queue = "shell backend \"/usr/bin/python3 -c 'from restapi.connectors import rabbitmq; q = rabbitmq.__name__; r = rabbitmq.get_instance();print(q, r.queue_exists(q));'\""
    create_queue = "shell backend \"/usr/bin/python3 -c 'from restapi.connectors import rabbitmq; q = rabbitmq.__name__; r = rabbitmq.get_instance(); r.create_queue(q);'\""
    delete_queue = "shell backend \"/usr/bin/python3 -c 'from restapi.connectors import rabbitmq; q = rabbitmq.__name__; r = rabbitmq.get_instance(); r.delete_queue(q);'\""

    exec_command(capfd, query_queue, "restapi.connectors.rabbitmq False")

    exec_command(
        capfd,
        create_queue,
    )

    exec_command(capfd, query_queue, "restapi.connectors.rabbitmq True")

    # Backup command
    exec_command(
        capfd,
        "backup rabbit",
        "RabbitMQ is running and the backup will temporary stop it. "
        "If you want to continue add --force flag",
    )
    exec_command(
        capfd,
        "backup rabbit --force --restart backend",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
        "Restarting services in 20 seconds...",
        "Restarting services in 10 seconds...",
    )
    # This is to verify that --force restarted rabbit
    exec_command(
        capfd,
        "backup rabbit",
        "RabbitMQ is running and the backup will temporary stop it. "
        "If you want to continue add --force flag",
    )

    exec_command(
        capfd,
        "backup invalid",
        "Invalid value for",
        "'invalid' is not one of 'mariadb', 'neo4j', 'postgres', 'rabbit', 'redis'",
    )

    exec_command(capfd, "remove", "Stack removed")

    exec_command(
        capfd,
        "backup rabbit",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )

    # Test backup retention
    exec_command(
        capfd,
        "backup rabbit --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup rabbit --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )

    exec_command(
        capfd,
        "backup rabbit --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup rabbit --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )

    # Create an additional backup to the test deletion (now backups are 3)
    exec_command(
        capfd,
        "backup rabbit",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )

    # Save the current number of backup files
    number_of_backups = len(list(backup_folder.glob("*")))

    # Verify the deletion
    exec_command(
        capfd,
        "backup rabbit --max 1",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )

    # Now the number of backups should be reduced by 1 (i.e. +1 -2)
    assert len(list(backup_folder.glob("*"))) == number_of_backups - 1

    # Verify that --max ignores files without the date pattern
    backup_folder.joinpath("xyz").touch(exist_ok=True)
    backup_folder.joinpath("xyz.ext").touch(exist_ok=True)
    backup_folder.joinpath("2020_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_01").touch(exist_ok=True)
    backup_folder.joinpath("9999_01_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_99_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_99-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-99_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_99_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_99.bak").touch(exist_ok=True)

    exec_command(
        capfd,
        "backup rabbit --max 999 --dry-run",
        "Dry run mode is enabled",
        # Still finding 2, all files above are ignore because not matching the pattern
        "Found 2 backup files, maximum not reached",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )

    exec_command(capfd, "start backend rabbit")

    # Just some delay extra delay, rabbit is a slow starter
    if Configuration.swarm_mode:
        time.sleep(20)
    else:
        time.sleep(10)

    exec_command(
        capfd,
        delete_queue,
    )

    exec_command(capfd, query_queue, "restapi.connectors.rabbitmq False")

    # Restore command
    exec_command(
        capfd,
        "restore rabbit",
        "Please specify one of the following backup:",
        ".tar.gz",
    )

    exec_command(
        capfd,
        "restore rabbit invalid",
        "Invalid backup file, data/backup/rabbit/invalid does not exist",
    )

    with TemporaryRemovePath(BACKUP_DIR):
        exec_command(
            capfd,
            "restore rabbit",
            "No backup found, the following folder "
            "does not exist: data/backup/rabbit",
        )

    with TemporaryRemovePath(backup_folder):
        exec_command(
            capfd,
            "restore rabbit",
            f"No backup found, the following folder does not exist: {backup_folder}",
        )

        os.mkdir("data/backup/rabbit")

        exec_command(
            capfd,
            "restore rabbit",
            "No backup found, data/backup/rabbit is empty",
        )

        open("data/backup/rabbit/test.gz", "a").close()

        exec_command(
            capfd,
            "restore rabbit",
            "No backup found, data/backup/rabbit is empty",
        )

        open("data/backup/rabbit/test.tar.gz", "a").close()

        exec_command(
            capfd,
            "restore rabbit",
            "Please specify one of the following backup:",
            "test.tar.gz",
        )

        os.remove("data/backup/rabbit/test.gz")
        os.remove("data/backup/rabbit/test.tar.gz")

    # Test restore on rabbit (required rabbit to be down)
    files = os.listdir("data/backup/rabbit")
    files = [f for f in files if f.endswith(".tar.gz")]
    files.sort()
    rabbit_dump_file = files[-1]

    exec_command(capfd, "remove")
    # 3) restore the dump
    exec_command(
        capfd,
        f"restore rabbit {rabbit_dump_file}",
        "Starting restore on rabbit...",
        f"Restore from data/backup/rabbit/{rabbit_dump_file} completed",
    )

    exec_command(capfd, "start", "Stack started")
    # 4) verify data match again point 1 (restore completed)
    # postponed because rabbit needs time to start...

    exec_command(
        capfd,
        f"restore rabbit {rabbit_dump_file}",
        "RabbitMQ is running and the restore will temporary stop it.",
        "If you want to continue add --force flag",
    )

    exec_command(
        capfd,
        f"restore rabbit {rabbit_dump_file} --force --restart backend",
        "Starting restore on rabbit...",
        f"Restore from data/backup/rabbit/{rabbit_dump_file} completed",
        "Restarting services in 20 seconds...",
        "Restarting services in 10 seconds...",
    )

    # Wait rabbit to completely startup
    service_verify(capfd, "rabbitmq")

    exec_command(capfd, query_queue, "restapi.connectors.rabbitmq True")
Esempio n. 13
0
def test_password_redis(capfd: Capture, faker: Faker) -> None:

    project_name = random_project_name(faker)
    create_project(
        capfd=capfd,
        name=project_name,
        auth="no",
        frontend="no",
        services=["redis"],
    )

    init_project(capfd, "-e API_AUTOSTART=1")
    start_registry(capfd)

    now = datetime.now()
    today = now.strftime("%Y-%m-%d")

    exec_command(
        capfd,
        "password",
        f"redis      REDIS_PASSWORD         {colors.RED}N/A",
    )

    redis_pass1 = get_variable_from_projectrc("REDIS_PASSWORD")
    exec_command(
        capfd,
        "password redis --random",
        "redis was not running, restart is not needed",
        "The password of redis has been changed. ",
        "Please find the new password into your .projectrc file as "
        "REDIS_PASSWORD variable",
    )
    redis_pass2 = get_variable_from_projectrc("REDIS_PASSWORD")
    assert redis_pass1 != redis_pass2

    exec_command(
        capfd,
        "password",
        f"redis      REDIS_PASSWORD         {colors.GREEN}{today}",
    )

    pull_images(capfd)
    start_project(capfd)

    service_verify(capfd, "redis")

    backend_start_date = get_container_start_date(capfd, "backend")
    redis_start_date = get_container_start_date(capfd, "redis")

    exec_command(
        capfd,
        "password redis --random",
        "redis was running, restarting services...",
        "The password of redis has been changed. ",
        "Please find the new password into your .projectrc file as "
        "REDIS_PASSWORD variable",
    )

    redis_pass3 = get_variable_from_projectrc("REDIS_PASSWORD")
    assert redis_pass2 != redis_pass3

    backend_start_date2 = get_container_start_date(capfd, "backend", wait=True)
    redis_start_date2 = get_container_start_date(capfd, "redis", wait=False)

    # Verify that both backend and redis are restarted
    assert backend_start_date2 != backend_start_date
    assert redis_start_date2 != redis_start_date

    service_verify(capfd, "redis")

    exec_command(
        capfd,
        "password",
        f"redis      REDIS_PASSWORD         {colors.GREEN}{today}",
    )

    mypassword = faker.pystr()
    exec_command(
        capfd,
        f"password redis --password {mypassword}",
        "The password of redis has been changed. ",
    )
    assert mypassword == get_variable_from_projectrc("REDIS_PASSWORD")

    exec_command(
        capfd,
        "password --show",
        mypassword,
    )

    if Configuration.swarm_mode:
        time.sleep(5)

    service_verify(capfd, "redis")

    future = now + timedelta(days=PASSWORD_EXPIRATION + 1)
    expired = (now + timedelta(days=PASSWORD_EXPIRATION)).strftime("%Y-%m-%d")

    with freeze_time(future):
        exec_command(
            capfd,
            "password",
            f"redis      REDIS_PASSWORD         {colors.RED}{today}",
        )

        exec_command(
            capfd,
            "check -i main --no-git --no-builds",
            f"REDIS_PASSWORD is expired on {expired}",
        )

    # Cleanup the stack for the next test
    exec_command(capfd, "remove", "Stack removed")
Esempio n. 14
0
def test_password_registry(capfd: Capture, faker: Faker) -> None:

    if not Configuration.swarm_mode:
        return None

    project_name = random_project_name(faker)
    create_project(
        capfd=capfd,
        name=project_name,
        auth="postgres",
        frontend="no",
    )

    init_project(capfd)

    now = datetime.now()
    today = now.strftime("%Y-%m-%d")

    exec_command(
        capfd,
        "password",
        f"registry   REGISTRY_PASSWORD      {colors.RED}N/A",
    )
    registry_pass1 = get_variable_from_projectrc("REGISTRY_PASSWORD")

    docker.container.remove(REGISTRY, force=True)

    exec_command(
        capfd,
        "password registry --random",
        "registry was not running, restart is not needed",
        "The password of registry has been changed. ",
        "Please find the new password into your .projectrc file as "
        "REGISTRY_PASSWORD variable",
    )
    registry_pass2 = get_variable_from_projectrc("REGISTRY_PASSWORD")
    assert registry_pass1 != registry_pass2

    start_registry(capfd)

    exec_command(
        capfd,
        "password",
        f"registry   REGISTRY_PASSWORD      {colors.GREEN}{today}",
    )

    exec_command(capfd, "images", "This registry contains ")

    registry_start_date = get_container_start_date(capfd,
                                                   "registry",
                                                   wait=True)

    exec_command(
        capfd,
        "password registry --random",
        "registry was running, restarting services...",
        "The password of registry has been changed. ",
        "Please find the new password into your .projectrc file as "
        "REGISTRY_PASSWORD variable",
    )

    registry_pass3 = get_variable_from_projectrc("REGISTRY_PASSWORD")
    assert registry_pass2 != registry_pass3

    registry_start_date2 = get_container_start_date(capfd,
                                                    "registry",
                                                    wait=True)

    assert registry_start_date2 != registry_start_date

    exec_command(capfd, "images", "This registry contains ")

    exec_command(
        capfd,
        "password",
        f"registry   REGISTRY_PASSWORD      {colors.GREEN}{today}",
    )

    future = now + timedelta(days=PASSWORD_EXPIRATION + 1)
    expired = (now + timedelta(days=PASSWORD_EXPIRATION)).strftime("%Y-%m-%d")

    with freeze_time(future):
        exec_command(
            capfd,
            "password",
            f"registry   REGISTRY_PASSWORD      {colors.GREEN}{today}",
        )

        exec_command(
            capfd,
            "check -i main --no-git --no-builds",
            f"REGISTRY_PASSWORD is expired on {expired}",
        )

    # This is needed otherwise the following tests will be unable to start
    # a new instance of the registry and will fail with registry auth errors
    exec_command(capfd, "remove registry", "Service registry removed")
Esempio n. 15
0
def test_all(capfd: Capture) -> None:

    execute_outside(capfd, "logs backend")

    create_project(
        capfd=capfd,
        name="first",
        auth="postgres",
        frontend="angular",
    )
    init_project(capfd)

    start_registry(capfd)

    pull_images(capfd)
    start_project(capfd)

    # Invalid services are refused
    exec_command(
        capfd,
        "logs --tail 1 invalid",
        "No such service: invalid",
    )

    now = datetime.now()

    signal.signal(signal.SIGALRM, mock_KeyboardInterrupt)
    signal.alarm(5)
    # Here using main services option
    exec_command(
        capfd,
        "logs --tail 10 --follow backend",
        "REST API backend server is ready to be launched",
    )
    end = datetime.now()

    assert (end - now).seconds >= 4
    signal.alarm(0)

    exec_command(
        capfd,
        "logs backend",
        "REST API backend server is ready to be launched",
    )

    exec_command(
        capfd,
        "logs --tail 1",
        "Enabled services: backend, frontend, postgres",
    )

    exec_command(
        capfd,
        "logs --tail 1 backend",
        "Enabled services: backend",
    )

    exec_command(
        capfd,
        "logs --tail 1 frontend",
        "Enabled services: frontend",
    )

    exec_command(
        capfd,
        "logs --tail 1 backend frontend",
        "Enabled services: backend, frontend",
    )

    exec_command(
        capfd,
        "logs --tail 1 frontend backend",
        "Enabled services: backend, frontend",
    )

    exec_command(
        capfd,
        "logs --tail 1 backend invalid",
        "No such service: invalid",
    )

    # Backend logs are never timestamped
    exec_command(
        capfd,
        "logs --tail 20 backend",
        # Logs are not prefixed because only one service is shown
        "Testing mode",
    )

    # Debug code... no logs in swarm mode for frontend, even after a wait 20...
    if Configuration.swarm_mode:
        exec_command(
            capfd,
            "logs --tail 10 frontend",
        )
    else:
        timestamp = now.strftime("%Y-%m-%dT")
        # Frontend logs are always timestamped
        exec_command(
            capfd,
            "logs --tail 10 frontend",
            # Logs are not prefixed because only one service is shown
            f"{timestamp}",
        )

    # Follow flag is not supported in swarm mode with multiple services
    if Configuration.swarm_mode:
        # Multiple services are not supported in swarm mode
        exec_command(
            capfd,
            "logs --follow",
            "Follow flag is not supported on multiple services",
        )

        exec_command(
            capfd,
            "logs --follow backend frontend",
            "Follow flag is not supported on multiple services",
        )
Esempio n. 16
0
def test_all(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "ssl")

    project = random_project_name(faker)
    create_project(
        capfd=capfd,
        name=project,
        auth="neo4j",
        frontend="no",
        services=["rabbit"],
    )
    pconf = f"projects/{project}/project_configuration.yaml"

    exec_command(
        capfd,
        "--prod init -f",
        "Created default .projectrc file",
        "Project initialized",
    )

    start_registry(capfd)

    exec_command(
        capfd,
        "ssl",
        f"image, execute {colors.RED}rapydo pull proxy",
    )

    exec_command(
        capfd,
        "--prod pull --quiet",
        "Base images pulled from docker hub",
    )

    exec_command(
        capfd,
        "ssl",
        "The proxy is not running, start your stack or try with "
        f"{colors.RED}rapydo ssl --volatile",
    )

    # Before creating SSL certificates rabbit and neo4j should not be able to start
    exec_command(
        capfd,
        "run --debug rabbit",
        "SSL mandatory file not found: /ssl/real/fullchain1.pem",
    )

    exec_command(
        capfd,
        "run --debug neo4j",
        "SSL mandatory file not found: /ssl/real/fullchain1.pem",
    )

    exec_command(
        capfd,
        "ssl --volatile",
        "Creating a self signed SSL certificate",
        "Self signed SSL certificate successfully created",
        # Just to verify that the default does not change
        "Generating DH parameters, 1024 bit long safe prime, generator 2",
    )

    # Start to verify certificate creation while services are running
    exec_command(
        capfd,
        "--prod start",
    )

    # Needed because the next command requires rabbit already started
    # Otherwise will fail with:
    # Error: unable to perform an operation on node 'rabbit@rabbit'.
    # Please see diagnostics information and suggestions below.
    if Configuration.swarm_mode:
        # 60!? :| It still fails after raising to 30... Let's double it!!
        # 90!? :| It still fails after raising to 60!!
        time.sleep(90)
        # DEBUG CODE
        exec_command(capfd, "logs rabbit")
    else:
        time.sleep(5)

    service_verify(capfd, "rabbitmq")

    exec_command(
        capfd,
        "ssl --no-tty",
        "--no-tty option is deprecated, you can stop using it",
        "Creating a self signed SSL certificate",
        "Self signed SSL certificate successfully created",
        "Neo4j is running, a full restart is needed. NOT IMPLEMENTED YET.",
        "RabbitMQ is running, executing command to refresh the certificate",
        "New certificate successfully enabled",
    )

    exec_command(
        capfd,
        "ssl --chain-file /file",
        "Invalid chain file (you provided /file)",
    )
    exec_command(
        capfd,
        "ssl --key-file /file",
        "Invalid chain file (you provided none)",
    )

    exec_command(
        capfd,
        f"ssl --chain-file {pconf}",
        "Invalid key file (you provided none)",
    )
    exec_command(
        capfd,
        f"ssl --chain-file {pconf} --key-file /file",
        "Invalid key file (you provided /file)",
    )
    exec_command(
        capfd,
        f"ssl --chain-file {pconf} --key-file {pconf}",
        "Unable to automatically perform the requested operation",
        "You can execute the following commands by your-self:",
    )
Esempio n. 17
0
def test_password_rabbit(capfd: Capture, faker: Faker) -> None:

    project_name = random_project_name(faker)
    create_project(
        capfd=capfd,
        name=project_name,
        auth="no",
        frontend="no",
        services=["rabbit"],
    )

    init_project(capfd, "-e API_AUTOSTART=1")
    start_registry(capfd)

    now = datetime.now()
    today = now.strftime("%Y-%m-%d")

    exec_command(
        capfd,
        "password rabbit --random",
        "Can't update rabbit because it is not running. Please start your stack",
    )

    exec_command(
        capfd,
        "password",
        f"rabbit     RABBITMQ_PASSWORD      {colors.RED}N/A",
    )

    pull_images(capfd)
    start_project(capfd)

    service_verify(capfd, "rabbitmq")

    #  ############## RABBIT #####################

    backend_start_date = get_container_start_date(capfd, "backend")
    rabbit_start_date = get_container_start_date(capfd, "rabbit")
    rabbit_pass1 = get_variable_from_projectrc("RABBITMQ_PASSWORD")

    exec_command(
        capfd,
        "password rabbit --random",
        "rabbit was running, restarting services...",
        "The password of rabbit has been changed. ",
        "Please find the new password into your .projectrc file as "
        "RABBITMQ_PASSWORD variable",
    )

    rabbit_pass2 = get_variable_from_projectrc("RABBITMQ_PASSWORD")
    assert rabbit_pass1 != rabbit_pass2

    backend_start_date2 = get_container_start_date(capfd, "backend", wait=True)
    rabbit_start_date2 = get_container_start_date(capfd, "rabbit", wait=False)

    # Verify that both backend and rabbit are restarted
    assert backend_start_date2 != backend_start_date
    assert rabbit_start_date2 != rabbit_start_date

    service_verify(capfd, "rabbitmq")

    exec_command(
        capfd,
        "password",
        f"rabbit     RABBITMQ_PASSWORD      {colors.GREEN}{today}",
    )

    # Needed to prevent random:
    # failed to update service xyz_rabbit:
    # Error response from daemon:
    # rpc error: code = Unknown desc = update out of sequence
    if Configuration.swarm_mode:
        time.sleep(3)

    mypassword = faker.pystr()
    exec_command(
        capfd,
        f"password rabbit --password {mypassword}",
        "The password of rabbit has been changed. ",
    )
    assert mypassword == get_variable_from_projectrc("RABBITMQ_PASSWORD")

    exec_command(
        capfd,
        "password --show",
        mypassword,
    )

    if Configuration.swarm_mode:
        time.sleep(5)

    service_verify(capfd, "rabbitmq")

    future = now + timedelta(days=PASSWORD_EXPIRATION + 1)
    expired = (now + timedelta(days=PASSWORD_EXPIRATION)).strftime("%Y-%m-%d")

    with freeze_time(future):
        exec_command(
            capfd,
            "password",
            f"rabbit     RABBITMQ_PASSWORD      {colors.RED}{today}",
        )

        exec_command(
            capfd,
            "check -i main --no-git --no-builds",
            f"RABBITMQ_PASSWORD is expired on {expired}",
        )

    # Cleanup the stack for the next test
    exec_command(capfd, "remove", "Stack removed")
Esempio n. 18
0
def test_all(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "shell backend ls")

    create_project(
        capfd=capfd,
        name="first",
        auth="no",
        frontend="angular",
        services=["redis", "fail2ban"],
    )
    init_project(capfd)

    start_registry(capfd)

    pull_images(capfd)
    start_project(capfd)

    exec_command(
        capfd, "shell invalid", "No running container found for invalid service"
    )

    exec_command(
        capfd,
        "shell --no-tty backend invalid",
        "--no-tty option is deprecated, you can stop using it",
    )

    exec_command(
        capfd,
        "shell backend invalid",
        "The command execution was terminated by command cannot be invoked. "
        "Exit code is 126",
    )

    exec_command(
        capfd,
        'shell backend "bash invalid"',
        "The command execution was terminated by command not found. "
        "Exit code is 127",
    )

    exec_command(
        capfd,
        "shell backend hostname",
        "backend-server",
    )

    signal.signal(signal.SIGALRM, signal_handler)
    signal.alarm(2)
    exec_command(
        capfd,
        "shell backend --default-command",
        "Time is up",
    )

    # This can't work on GitHub Actions due to the lack of tty
    # signal.signal(signal.SIGALRM, handler)
    # signal.alarm(2)
    # exec_command(
    #     capfd,
    #     "shell backend",
    #     # "developer@backend-server:[/code]",
    #     "Time is up",
    # )

    # Testing default users. I did't include all the containers because:
    #   1. this will greatly slow down this test for a very small benefit
    #   2. check the presence of 'postgres' in the output of shell postgres whoami
    #      is trivial because it is always in the output, due to the echo of the command
    exec_command(
        capfd,
        "shell backend whoami",
        "developer",
    )

    exec_command(
        capfd,
        "shell frontend whoami",
        "node",
    )

    # Added because fail2ban is deployed in global mode, so that the container name is
    # different and this can make the command to fail
    # (as happened before the introduction of this test)
    exec_command(
        capfd,
        "shell fail2ban whoami",
        "root",
    )

    exec_command(
        capfd,
        "remove",
        "Stack removed",
    )

    exec_command(
        capfd,
        "shell backend hostname",
        "Requested command: hostname with user: developer",
        "No running container found for backend service",
    )

    exec_command(
        capfd,
        "shell backend --default",
        "Requested command: restapi launch with user: developer",
        "No running container found for backend service",
    )

    exec_command(
        capfd,
        "shell backend --replica 1 --default",
        "Requested command: restapi launch with user: developer",
        "No running container found for backend service",
    )

    exec_command(
        capfd,
        "shell backend --replica 2 --default",
        "Requested command: restapi launch with user: developer",
        "Replica number 2 not found for backend service",
    )

    if Configuration.swarm_mode:
        service = "backend"

        exec_command(
            capfd,
            "start backend",
            "Stack started",
        )

        exec_command(
            capfd,
            "scale backend=2 --wait",
            "first_backend scaled to 2",
            "Service converged",
        )
    else:

        service = "redis"
        exec_command(
            capfd,
            "scale redis=2",
            "Scaling services: redis=2...",
            "Services scaled: redis=2",
        )

    docker = Docker()
    container1 = docker.get_container(service, slot=1)
    container2 = docker.get_container(service, slot=2)
    assert container1 is not None
    assert container2 is not None
    assert container1 != container2

    string1 = faker.pystr(min_chars=30, max_chars=30)
    string2 = faker.pystr(min_chars=30, max_chars=30)

    docker.client.container.execute(
        container1[0],
        command=["touch", f"/tmp/{string1}"],
        tty=False,
        detach=False,
    )

    docker.client.container.execute(
        container2[0],
        command=["touch", f"/tmp/{string2}"],
        tty=False,
        detach=False,
    )

    exec_command(capfd, f"shell {service} --replica 1 'ls /tmp/'", string1)

    exec_command(capfd, f"shell {service} --replica 2 'ls /tmp/'", string2)

    exec_command(
        capfd,
        f"shell {service} mycommand --replica 2 --broadcast",
        "--replica and --broadcast options are not compatible",
    )

    exec_command(
        capfd,
        f"shell {service} --broadcast 'ls /tmp/'",
        string1,
        string2,
    )

    exec_command(
        capfd,
        "remove",
        "Stack removed",
    )

    exec_command(
        capfd,
        f"shell {service} mycommand --broadcast",
        f"No running container found for {service} service",
    )
Esempio n. 19
0
def test_all(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "list env")
    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="postgres",
        frontend="no",
        services=["redis"],
        extra="--env CUSTOMVAR1=mycustomvalue --env CUSTOMVAR2=mycustomvalue",
    )
    init_project(capfd)

    # Some tests with list
    exec_command(
        capfd,
        "list",
        "Missing argument 'ELEMENT_TYPE:{env|services|submodules}'. Choose from:",
    )

    exec_command(
        capfd,
        "list invalid",
        "Invalid value for",
        "'invalid' is not one of 'env', 'services', 'submodules'",
    )

    exec_command(
        capfd,
        "list env",
        "List env variables:",
        "ACTIVATE_ALCHEMY",
        "CUSTOMVAR1",
        "CUSTOMVAR2",
        "mycustomvalue",
    )
    exec_command(
        capfd,
        "list submodules",
        "List of submodules:",
    )

    exec_command(
        capfd,
        "list services",
        "List of active services:",
        "backend",
        "postgres",
        "redis",
        "N/A",
    )

    start_registry(capfd)

    pull_images(capfd)

    start_project(capfd)

    exec_command(
        capfd,
        "list services",
        "List of active services:",
        "backend",
        "postgres",
        "redis",
        "running",
    )
Esempio n. 20
0
def test_scale(capfd: Capture) -> None:

    execute_outside(capfd, "scale x=1")

    create_project(
        capfd=capfd,
        name="first",
        auth="postgres",
        frontend="no",
        services=["redis"],
    )
    init_project(capfd)

    # backend, postgres, redis
    BASE_SERVICE_NUM = 3

    if Configuration.swarm_mode:

        exec_command(
            capfd,
            "scale backend=2",
            "Registry 127.0.0.1:5000 not reachable.",
        )

        start_registry(capfd)

        # Add the registry
        BASE_SERVICE_NUM += 1

    exec_command(
        capfd,
        "scale backend=2",
        f"image, execute {colors.RED}rapydo pull backend",
    )

    pull_images(capfd)

    if Configuration.swarm_mode:
        exec_command(
            capfd,
            "scale backend=2",
            "No such service: first_backend, have you started your stack?",
        )

    start_project(capfd)

    assert count_running_containers() == BASE_SERVICE_NUM

    exec_command(
        capfd,
        "scale redis=x",
        "Invalid number of replicas: x",
    )

    if Configuration.swarm_mode:

        exec_command(
            capfd,
            "scale backend=2 --wait",
            "first_backend scaled to 2",
            "Service converged",
        )

        assert count_running_containers() == BASE_SERVICE_NUM + 1

        exec_command(
            capfd,
            "status",
            " [2]",
        )

        exec_command(
            capfd,
            "scale backend",
            "first_backend scaled to 1",
        )

        # The backend instances are still 2 because the service is not converged yet
        # (--wait flag was not included in the previous command)
        assert count_running_containers() == BASE_SERVICE_NUM + 1

        # So just sleep for a while to let the service to converge
        time.sleep(3)

        assert count_running_containers() == BASE_SERVICE_NUM

        exec_command(
            capfd,
            "-e DEFAULT_SCALE_BACKEND=3 scale backend --wait",
            "first_backend scaled to 3",
            "Service converged",
        )

        assert count_running_containers() == BASE_SERVICE_NUM + 2

        exec_command(
            capfd,
            "status",
            " [3]",
        )

        with open(".projectrc", "a") as f:
            f.write("\n      DEFAULT_SCALE_BACKEND: 4\n")

        exec_command(
            capfd,
            "scale backend",
            "first_backend scaled to 4",
        )

        # Just wait for a while for all tasks to start, necessary because the previous
        # command did not include --wait flag
        time.sleep(2)

        assert count_running_containers() == BASE_SERVICE_NUM + 3

        # This should restart all the replicas.
        exec_command(
            capfd,
            "start",
        )

        # Verify that 2 replicas are still running after the restart
        exec_command(
            capfd,
            "start --force",
        )

        # Just wait for a while for all tasks to start, necessary because the previous
        # command did not include --wait flag
        time.sleep(2)

        # Still not working
        # assert count_running_containers() == BASE_SERVICE_NUM + 3

        # exec_command(
        #     capfd,
        #     "scale backend=0 --wait",
        #     "first_backend scaled to 0",
        # )

        # assert count_running_containers() == BASE_SERVICE_NUM - 1

        exec_command(
            capfd,
            "scale redis=2",
            "Service redis is not guaranteed to support the scale, "
            "can't accept the request",
        )

    else:

        exec_command(
            capfd,
            "scale redis",
            "Scaling services: redis=1...",
            "Services scaled: redis=1",
        )

        assert count_running_containers() == BASE_SERVICE_NUM

        exec_command(
            capfd,
            "-e DEFAULT_SCALE_REDIS=2 scale redis",
            "Scaling services: redis=2...",
            "Services scaled: redis=2",
        )

        assert count_running_containers() == BASE_SERVICE_NUM + 1

        exec_command(
            capfd,
            "scale redis=3",
            "Scaling services: redis=3...",
            "Services scaled: redis=3",
        )

        assert count_running_containers() == BASE_SERVICE_NUM + 2

        with open(".projectrc", "a") as f:
            f.write("\n      DEFAULT_SCALE_REDIS: 4\n")

        exec_command(
            capfd,
            "scale redis",
            "Scaling services: redis=4...",
            "Services scaled: redis=4",
        )

        assert count_running_containers() == BASE_SERVICE_NUM + 3

        exec_command(
            capfd,
            "scale redis=1",
            "Scaling services: redis=1...",
            "Services scaled: redis=1",
        )

        assert count_running_containers() == BASE_SERVICE_NUM

        exec_command(
            capfd,
            "scale redis=2",
            "Scaling services: redis=2...",
            "Services scaled: redis=2",
        )

        assert count_running_containers() == BASE_SERVICE_NUM + 1

        # This should restart all the replicas.
        exec_command(
            capfd,
            "start",
        )

        # Verify that 2 replicas are still running after the restart
        exec_command(
            capfd,
            "start --force",
        )
Esempio n. 21
0
def test_password_flower(capfd: Capture, faker: Faker) -> None:

    project_name = random_project_name(faker)
    create_project(
        capfd=capfd,
        name=project_name,
        auth="no",
        frontend="no",
        services=["flower"],
    )

    init_project(capfd, "-e API_AUTOSTART=1")
    start_registry(capfd)

    now = datetime.now()
    today = now.strftime("%Y-%m-%d")

    exec_command(
        capfd,
        "password",
        f"flower     FLOWER_PASSWORD        {colors.RED}N/A",
    )

    flower_pass1 = get_variable_from_projectrc("FLOWER_PASSWORD")
    exec_command(
        capfd,
        "password flower --random",
        "flower was not running, restart is not needed",
        "The password of flower has been changed. ",
        "Please find the new password into your .projectrc file as "
        "FLOWER_PASSWORD variable",
    )
    flower_pass2 = get_variable_from_projectrc("FLOWER_PASSWORD")
    assert flower_pass1 != flower_pass2

    exec_command(
        capfd,
        "password",
        f"flower     FLOWER_PASSWORD        {colors.GREEN}{today}",
    )

    pull_images(capfd)
    start_project(capfd)

    flower_start_date = get_container_start_date(capfd, "flower", wait=True)

    exec_command(
        capfd,
        "password flower --random",
        "flower was running, restarting services...",
        "The password of flower has been changed. ",
        "Please find the new password into your .projectrc file as "
        "FLOWER_PASSWORD variable",
    )

    flower_pass3 = get_variable_from_projectrc("FLOWER_PASSWORD")
    assert flower_pass2 != flower_pass3

    flower_start_date2 = get_container_start_date(capfd, "flower", wait=True)

    assert flower_start_date2 != flower_start_date

    exec_command(
        capfd,
        "password",
        f"flower     FLOWER_PASSWORD        {colors.GREEN}{today}",
    )

    mypassword = faker.pystr()
    exec_command(
        capfd,
        f"password flower --password {mypassword}",
        "The password of flower has been changed. ",
    )
    assert mypassword == get_variable_from_projectrc("FLOWER_PASSWORD")

    exec_command(
        capfd,
        "password --show",
        mypassword,
    )

    future = now + timedelta(days=PASSWORD_EXPIRATION + 1)
    expired = (now + timedelta(days=PASSWORD_EXPIRATION)).strftime("%Y-%m-%d")

    with freeze_time(future):
        exec_command(
            capfd,
            "password",
            f"flower     FLOWER_PASSWORD        {colors.RED}{today}",
        )

        exec_command(
            capfd,
            "check -i main --no-git --no-builds",
            f"FLOWER_PASSWORD is expired on {expired}",
        )

    # Cleanup the stack for the next test
    exec_command(capfd, "remove", "Stack removed")
Esempio n. 22
0
def test_all(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "backup redis")
    execute_outside(capfd, "restore redis")

    backup_folder = BACKUP_DIR.joinpath("redis")

    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="no",
        frontend="no",
        services=["redis"],
    )
    init_project(capfd)
    start_registry(capfd)

    exec_command(
        capfd,
        "backup redis",
        f"image, execute {colors.RED}rapydo pull redis",
    )
    exec_command(
        capfd,
        "restore redis",
        f"image, execute {colors.RED}rapydo pull redis",
    )

    pull_images(capfd)
    start_project(capfd)

    service_verify(capfd, "redis")

    key = faker.pystr()
    value1 = f"old-{faker.pystr()}"
    value2 = f"new-{faker.pystr()}"

    # NOTE: q = redis.__name__ is just to have a fixed name to be used to test the
    # queue without the need to introdure further nested " or '
    get_key = f'shell redis "sh -c \'redis-cli --pass "$REDIS_PASSWORD" get {key}\'"'
    set_key1 = (
        f'shell redis "sh -c \'redis-cli --pass "$REDIS_PASSWORD" set {key} {value1}\'"'
    )
    set_key2 = (
        f'shell redis "sh -c \'redis-cli --pass "$REDIS_PASSWORD" set {key} {value2}\'"'
    )

    exec_command(
        capfd,
        set_key1,
    )

    exec_command(capfd, get_key, value1)

    # Backup command on a running Redis
    exec_command(
        capfd,
        "backup redis",
        "Starting backup on redis...",
        "Backup completed: data/backup/redis/",
    )

    exec_command(
        capfd,
        "backup invalid",
        "Invalid value for",
        "'invalid' is not one of 'mariadb', 'neo4j', 'postgres', 'rabbit', 'redis'",
    )

    exec_command(capfd, "remove", "Stack removed")

    # Backup command on a stopped Redis
    exec_command(
        capfd,
        "backup redis",
        "Starting backup on redis...",
        "Backup completed: data/backup/redis/",
    )

    # Test backup retention
    exec_command(
        capfd,
        "backup redis --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on redis...",
        "Backup completed: data/backup/redis/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup redis --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on redis...",
        "Backup completed: data/backup/redis/",
    )

    exec_command(
        capfd,
        "backup redis --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on redis...",
        "Backup completed: data/backup/redis/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup redis --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on redis...",
        "Backup completed: data/backup/redis/",
    )

    # Create an additional backup to the test deletion (now backups are 3)
    exec_command(
        capfd,
        "backup redis",
        "Starting backup on redis...",
        "Backup completed: data/backup/redis/",
    )

    # Save the current number of backup files
    number_of_backups = len(list(backup_folder.glob("*")))

    # Verify the deletion
    exec_command(
        capfd,
        "backup redis --max 1",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on redis...",
        "Backup completed: data/backup/redis/",
    )

    # Now the number of backups should be reduced by 1 (i.e. +1 -2)
    assert len(list(backup_folder.glob("*"))) == number_of_backups - 1

    # Verify that --max ignores files without the date pattern
    backup_folder.joinpath("xyz").touch(exist_ok=True)
    backup_folder.joinpath("xyz.ext").touch(exist_ok=True)
    backup_folder.joinpath("2020_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_01").touch(exist_ok=True)
    backup_folder.joinpath("9999_01_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_99_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_99-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-99_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_99_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_99.bak").touch(exist_ok=True)

    exec_command(
        capfd,
        "backup redis --max 999 --dry-run",
        "Dry run mode is enabled",
        # Still finding 2, all files above are ignore because not matching the pattern
        "Found 2 backup files, maximum not reached",
        "Starting backup on redis...",
        "Backup completed: data/backup/redis/",
    )

    exec_command(capfd, "start backend redis")

    exec_command(
        capfd,
        set_key2,
    )

    exec_command(capfd, get_key, value2)

    # Restore command
    exec_command(
        capfd,
        "restore redis",
        "Please specify one of the following backup:",
        ".tar.gz",
    )

    exec_command(
        capfd,
        "restore redis invalid",
        "Invalid backup file, data/backup/redis/invalid does not exist",
    )

    with TemporaryRemovePath(BACKUP_DIR):
        exec_command(
            capfd,
            "restore redis",
            "No backup found, the following folder "
            "does not exist: data/backup/redis",
        )

    with TemporaryRemovePath(backup_folder):
        exec_command(
            capfd,
            "restore redis",
            f"No backup found, the following folder does not exist: {backup_folder}",
        )

        os.mkdir("data/backup/redis")

        exec_command(
            capfd,
            "restore redis",
            "No backup found, data/backup/redis is empty",
        )

        open("data/backup/redis/test.gz", "a").close()

        exec_command(
            capfd,
            "restore redis",
            "No backup found, data/backup/redis is empty",
        )

        open("data/backup/redis/test.tar.gz", "a").close()

        exec_command(
            capfd,
            "restore redis",
            "Please specify one of the following backup:",
            "test.tar.gz",
        )

        os.remove("data/backup/redis/test.gz")
        os.remove("data/backup/redis/test.tar.gz")

    # Test restore on redis (required redis to be down)
    files = os.listdir("data/backup/redis")
    files = [f for f in files if f.endswith(".tar.gz")]
    files.sort()
    redis_dump_file = files[-1]

    exec_command(capfd, "remove redis")
    # 3) restore the dump
    exec_command(
        capfd,
        f"restore redis {redis_dump_file}",
        "Starting restore on redis...",
        f"Restore from data/backup/redis/{redis_dump_file} completed",
    )

    exec_command(capfd, "start", "Stack started")
    # 4) verify data match again point 1 (restore completed)
    # postponed because redis needs time to start...

    exec_command(
        capfd,
        f"restore redis {redis_dump_file}",
        "Redis is running and the restore will temporary stop it.",
        "If you want to continue add --force flag",
    )

    exec_command(
        capfd,
        f"restore redis {redis_dump_file} --force --restart backend",
        "Starting restore on redis...",
        f"Restore from data/backup/redis/{redis_dump_file} completed",
    )

    # Wait redis to completely startup
    service_verify(capfd, "redis")

    exec_command(capfd, get_key, value1)
Esempio n. 23
0
def test_all(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "pull")
    execute_outside(capfd, "build")

    project2 = random_project_name(faker)
    create_project(
        capfd=capfd,
        name="testbuild",
        auth="no",
        frontend="no",
        services=["rabbit"],
    )
    init_project(capfd)
    create_project(
        capfd=capfd,
        name=project2,
        auth="no",
        frontend="no",
        services=["rabbit"],
    )

    if Configuration.swarm_mode:

        exec_command(
            capfd,
            "pull",
            "Registry 127.0.0.1:5000 not reachable.",
        )

        exec_command(
            capfd,
            "build",
            "docker buildx is installed",
            "Registry 127.0.0.1:5000 not reachable.",
        )

        start_registry(capfd)

    image = f"rapydo/backend:{__version__}"
    exec_command(
        capfd,
        "start",
        f"Missing {image} image, execute {colors.RED}rapydo pull backend",
    )

    exec_command(
        capfd,
        "-e ACTIVATE_RABBIT=0 pull --quiet rabbit",
        "No such service: rabbit",
    )

    exec_command(
        capfd,
        "pull --quiet proxy",
        "No such service: proxy",
    )

    exec_command(
        capfd,
        "pull --quiet",
        "Base images pulled from docker hub",
    )

    # Basic pull
    exec_command(
        capfd,
        "pull xxx",
        "No such service: xxx",
    )

    # --all is useless here... added just to include the parameter in some tests.
    # A true test on such parameter would be quite complex...
    exec_command(
        capfd,
        "pull --all --quiet backend",
        "Images pulled from docker hub",
    )

    # Add a custom image to extend base rabbit image:
    with open("projects/testbuild/confs/commons.yml", "a") as f:
        f.write("""
services:
  rabbit:
    build: ${PROJECT_DIR}/builds/rabbit
    image: testbuild/rabbit:${RAPYDO_VERSION}

    """)

    # Missing folder
    exec_command(
        capfd,
        "build rabbit",
        "docker buildx is installed",
        "Build path not found",
    )

    os.makedirs("projects/testbuild/builds/rabbit")

    # Missing Dockerfile
    exec_command(
        capfd,
        "build rabbit",
        "docker buildx is installed",
        "Build path not found: ",
        "projects/testbuild/builds/rabbit/Dockerfile",
    )

    # Empty Dockerfile
    with open("projects/testbuild/builds/rabbit/Dockerfile", "w+") as f:
        pass
    exec_command(
        capfd,
        "build rabbit",
        "docker buildx is installed",
        "Invalid Dockerfile, no base image found in ",
        "projects/testbuild/builds/rabbit/Dockerfile",
    )

    # Missing base image
    with open("projects/testbuild/builds/rabbit/Dockerfile", "w+") as f:
        f.write("RUN ls")
    exec_command(
        capfd,
        "build rabbit",
        "docker buildx is installed",
        "Invalid Dockerfile, no base image found in ",
        "projects/testbuild/builds/rabbit/Dockerfile",
    )

    # Invalid RAPyDo template
    with open("projects/testbuild/builds/rabbit/Dockerfile", "w+") as f:
        f.write("FROM rapydo/invalid")
    exec_command(
        capfd,
        "build rabbit",
        "docker buildx is installed",
        "Unable to find rapydo/invalid in this project",
        "Please inspect the FROM image in",
        "projects/testbuild/builds/rabbit/Dockerfile",
    )

    image = f"testbuild/rabbit:${__version__}"
    exec_command(
        capfd,
        "start",
        f" image, execute {colors.RED}rapydo build rabbit",
    )

    # Not a RAPyDo child but build is possibile
    with open("projects/testbuild/builds/rabbit/Dockerfile", "w+") as f:
        f.write("FROM ubuntu")
    exec_command(
        capfd,
        "build rabbit",
        "docker buildx is installed",
        "Custom images built",
    )

    with open("projects/testbuild/builds/rabbit/Dockerfile", "w+") as f:
        f.write(f"""
FROM rapydo/rabbitmq:{__version__}
# Just a simple command to differentiate from the parent
RUN mkdir xyz
""")

    r = Repo(".")
    r.git.add("-A")
    r.git.commit("-a", "-m", "'fake'")

    exec_command(
        capfd,
        "build rabbit",
        "docker buildx is installed",
        f"naming to docker.io/testbuild/rabbit:{__version__}",
        "Custom images built",
    )

    test_file = Path("projects/testbuild/builds/rabbit/test")
    with open(test_file, "w+") as f:
        f.write("test")

    exec_command(
        capfd,
        "check -i main --no-git",
        "Can't retrieve a commit history for ",
        "Checks completed",
    )

    test_file.unlink()

    exec_command(
        capfd,
        f"-e ACTIVATE_RABBIT=0 -p {project2} build --core rabbit",
        "No such service: rabbit",
    )

    # Rebuild core rabbit image => custom rabbit is now obsolete
    # Please note the use of the project 2.
    # This way we prevent to rebuilt the custom image of testbuild
    # This simulate a pull updating a core image making the custom image obsolete

    if Configuration.swarm_mode:
        swarm_push_warn = "Local registry push is not implemented yet for core images"
    else:
        swarm_push_warn = ""

    exec_command(
        capfd,
        f"-p {project2} build --core rabbit",
        "Core images built",
        swarm_push_warn,
        "No custom images to build",
    )
    exec_command(
        capfd,
        "check -i main --no-git",
        f"Obsolete image testbuild/rabbit:{__version__}",
        "built on ",
        " that changed on ",
        f"Update it with: {colors.RED}rapydo build rabbit",
    )

    # Add a second service with the same image to test redundant builds
    with open("projects/testbuild/confs/commons.yml", "a") as f:
        f.write("""
  rabbit2:
    build: ${PROJECT_DIR}/builds/rabbit
    image: testbuild/rabbit:${RAPYDO_VERSION}

    """)

    fin = open("submodules/build-templates/backend/Dockerfile", "a")
    fin.write("xyz")
    fin.close()
    r = Repo("submodules/build-templates")
    r.git.commit("-a", "-m", "'fake'")
    exec_command(
        capfd,
        "check -i main",
        f"Obsolete image rapydo/backend:{__version__}",
        "built on ",
        " but changed on ",
        f"Update it with: {colors.RED}rapydo pull backend",
    )

    exec_command(capfd, "remove", "Stack removed")

    # Add a third service without a build to verify that pull includes it
    # to be the base image even if defined in custom part
    with open("projects/testbuild/confs/commons.yml", "a") as f:
        f.write("""
  rabbit3:
    image: alpine:latest
    environment:
      ACTIVATE: 1
    """)

    exec_command(
        capfd,
        "pull --quiet rabbit3",
        "Base images pulled from docker hub",
    )

    # Now this should fail because pull does not include custom services
    exec_command(
        capfd,
        "start rabbit3",
        "Stack started",
    )
Esempio n. 24
0
def test_tuning(capfd: Capture, faker: Faker) -> None:

    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="neo4j",
        services=["postgres"],
        frontend="no",
    )
    init_project(capfd)

    start_registry(capfd)

    exec_command(
        capfd,
        "tuning neo4j",
        f"image, execute {colors.RED}rapydo pull neo4j",
    )

    pull_images(capfd)

    # Tuning command with neo4j container OFF
    exec_command(
        capfd,
        "tuning neo4j",
        "Number of CPU(s): ",
        "Amount of RAM: ",
        "Suggested settings:",
        "Use 'dbms.memory.heap.max_size' as NEO4J_HEAP_SIZE",
        "Use 'dbms.memory.pagecache.size' as NEO4J_PAGECACHE_SIZE",
        "Memory settings recommendation from neo4j-admin memrec:",
        "Based on the above, the following memory settings are recommended:",
        "dbms.memory.heap.initial_size=",
        "dbms.memory.heap.max_size=",
        "dbms.memory.pagecache.size=",
        "Total size of lucene indexes in all databases:",
        "Total size of data and native indexes in all databases:",
    )

    start_project(capfd)

    service_verify(capfd, "neo4j")
    service_verify(capfd, "sqlalchemy")

    exec_command(
        capfd,
        "tuning backend",
        "Number of CPU(s): ",
        "Amount of RAM: ",
        "Suggested settings:",
        "GUNICORN_MAX_NUM_WORKERS",
    )

    # Tuning command with neo4j container ON
    exec_command(
        capfd,
        "tuning neo4j",
        "Number of CPU(s): ",
        "Amount of RAM: ",
        "Suggested settings:",
        "Use 'dbms.memory.heap.max_size' as NEO4J_HEAP_SIZE",
        "Use 'dbms.memory.pagecache.size' as NEO4J_PAGECACHE_SIZE",
        "Memory settings recommendation from neo4j-admin memrec:",
        "Based on the above, the following memory settings are recommended:",
        "dbms.memory.heap.initial_size=",
        "dbms.memory.heap.max_size=",
        "dbms.memory.pagecache.size=",
        "Total size of lucene indexes in all databases:",
        "Total size of data and native indexes in all databases:",
    )

    exec_command(
        capfd,
        "tuning postgres",
        "Number of CPU(s): ",
        "Amount of RAM: ",
        "Suggested settings:",
        "POSTGRES_SHARED_BUFFERS",
        "POSTGRES_EFFECTIVE_CACHE_SIZE",
        "POSTGRES_MAINTENANCE_WORK_MEM",
        "POSTGRES_MAX_WORKER_PROCESSES",
    )
Esempio n. 25
0
def test_password_mysql(capfd: Capture, faker: Faker) -> None:

    project_name = random_project_name(faker)
    create_project(
        capfd=capfd,
        name=project_name,
        auth="mysql",
        frontend="no",
    )

    init_project(capfd, "-e API_AUTOSTART=1")
    start_registry(capfd)

    now = datetime.now()
    today = now.strftime("%Y-%m-%d")

    exec_command(
        capfd,
        "password mariadb --random",
        "Can't update mariadb because it is not running. Please start your stack",
    )

    exec_command(
        capfd,
        "password",
        f"mariadb    ALCHEMY_PASSWORD       {colors.RED}N/A",
        # f"mariadb    MYSQL_ROOT_PASSWORD    {colors.RED}N/A",
    )

    pull_images(capfd)
    start_project(capfd)

    service_verify(capfd, "sqlalchemy")

    backend_start_date = get_container_start_date(capfd, "backend")
    mariadb_start_date = get_container_start_date(capfd, "mariadb")
    mariadb_pass1 = get_variable_from_projectrc("ALCHEMY_PASSWORD")

    exec_command(
        capfd,
        "password mariadb --random",
        "mariadb was running, restarting services...",
        "The password of mariadb has been changed. ",
        "Please find the new password into your .projectrc file as "
        "ALCHEMY_PASSWORD variable",
    )

    mariadb_pass2 = get_variable_from_projectrc("ALCHEMY_PASSWORD")
    assert mariadb_pass1 != mariadb_pass2

    backend_start_date2 = get_container_start_date(capfd, "backend", wait=True)
    mariadb_start_date2 = get_container_start_date(capfd,
                                                   "mariadb",
                                                   wait=False)

    # Verify that both backend and mariadb are restarted
    assert backend_start_date2 != backend_start_date
    assert mariadb_start_date2 != mariadb_start_date

    service_verify(capfd, "sqlalchemy")

    exec_command(
        capfd,
        "password",
        f"mariadb    ALCHEMY_PASSWORD       {colors.GREEN}{today}",
        # f"mariadb    MYSQL_ROOT_PASSWORD    {colors.GREEN}{today}",
    )

    mypassword = faker.pystr()
    exec_command(
        capfd,
        f"password mariadb --password {mypassword}",
        "The password of mariadb has been changed. ",
    )
    assert mypassword == get_variable_from_projectrc("ALCHEMY_PASSWORD")

    exec_command(
        capfd,
        "password --show",
        mypassword,
    )

    if Configuration.swarm_mode:
        time.sleep(5)

    service_verify(capfd, "sqlalchemy")

    future = now + timedelta(days=PASSWORD_EXPIRATION + 1)
    expired = (now + timedelta(days=PASSWORD_EXPIRATION)).strftime("%Y-%m-%d")

    with freeze_time(future):
        exec_command(
            capfd,
            "password",
            f"mariadb    ALCHEMY_PASSWORD       {colors.RED}{today}",
        )

        exec_command(
            capfd,
            "check -i main --no-git --no-builds",
            f"ALCHEMY_PASSWORD is expired on {expired}",
        )

    # Cleanup the stack for the next test
    exec_command(capfd, "remove", "Stack removed")
Esempio n. 26
0
def test_all(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "backup postgres")
    execute_outside(capfd, "restore postgres")

    backup_folder = BACKUP_DIR.joinpath("postgres")

    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="postgres",
        frontend="no",
    )
    init_project(capfd)
    start_registry(capfd)

    exec_command(
        capfd,
        "backup postgres",
        f"image, execute {colors.RED}rapydo pull postgres",
    )
    exec_command(
        capfd,
        "restore postgres",
        f"image, execute {colors.RED}rapydo pull postgres",
    )

    pull_images(capfd)
    start_project(capfd)

    exec_command(capfd, "status")
    service_verify(capfd, "sqlalchemy")

    # This will initialize postgres
    exec_command(capfd, "shell backend 'restapi init'")

    # Verify the initialization
    psql = "shell postgres 'psql -U sqluser -d SQL_API -c"
    exec_command(
        capfd,
        f'{psql} "select name, description from role"\'',
        "normal_user       | User",
    )

    exec_command(
        capfd,
        "backup postgres",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )

    # A second backup is needed to test backup retention
    exec_command(
        capfd,
        "backup postgres",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )

    # Test backup retention
    exec_command(
        capfd,
        "backup postgres --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup postgres --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )

    exec_command(
        capfd,
        "backup postgres --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup postgres --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )

    # Create an additional backup to the test deletion (now backups are 3)
    exec_command(
        capfd,
        "backup postgres",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )
    # Save the current number of backup files
    number_of_backups = len(list(backup_folder.glob("*")))

    # Verify the deletion
    exec_command(
        capfd,
        "backup postgres --max 1",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )

    # Now the number of backups should be reduced by 1 (i.e. +1 -2)
    assert len(list(backup_folder.glob("*"))) == number_of_backups - 1

    # Verify that --max ignores files without the date pattern
    backup_folder.joinpath("xyz").touch(exist_ok=True)
    backup_folder.joinpath("xyz.ext").touch(exist_ok=True)
    backup_folder.joinpath("2020_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_01").touch(exist_ok=True)
    backup_folder.joinpath("9999_01_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_99_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_99-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-99_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_99_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_99.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_99.gz").touch(exist_ok=True)

    exec_command(
        capfd,
        "backup postgres --max 999 --dry-run",
        "Dry run mode is enabled",
        # Still finding 2, all files above are ignore because not matching the pattern
        "Found 2 backup files, maximum not reached",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )

    exec_command(
        capfd,
        "backup invalid",
        "Invalid value for",
        "'invalid' is not one of 'mariadb', 'neo4j', 'postgres', 'rabbit', 'redis'",
    )

    exec_command(capfd, "remove", "Stack removed")

    exec_command(
        capfd,
        "backup postgres",
        "The backup procedure requires postgres running, please start your stack",
    )

    exec_command(
        capfd,
        "restore postgres",
        "Please specify one of the following backup:",
        ".sql.gz",
    )
    exec_command(
        capfd,
        "restore postgres invalid",
        "Invalid backup file, data/backup/postgres/invalid does not exist",
    )

    with TemporaryRemovePath(BACKUP_DIR):
        exec_command(
            capfd,
            "restore postgres",
            "No backup found, the following folder "
            "does not exist: data/backup/postgres",
        )

    with TemporaryRemovePath(backup_folder):
        exec_command(
            capfd,
            "restore postgres",
            f"No backup found, the following folder does not exist: {backup_folder}",
        )

        os.mkdir("data/backup/postgres")

        exec_command(
            capfd,
            "restore postgres",
            "No backup found, data/backup/postgres is empty",
        )

        open("data/backup/postgres/test.sql.gz", "a").close()

        exec_command(
            capfd,
            "restore postgres",
            "Please specify one of the following backup:",
            "test.sql.gz",
        )

        os.remove("data/backup/postgres/test.sql.gz")

    files = os.listdir("data/backup/postgres")
    files = [f for f in files if f.endswith(".sql.gz")]
    files.sort()
    postgres_dump_file = files[-1]

    # Postgres restore not allowed if container is not running
    exec_command(
        capfd,
        f"restore postgres {postgres_dump_file}",
        "The restore procedure requires postgres running, please start your stack",
    )

    exec_command(capfd, "start", "Stack started")

    # Here we test the restore procedure:
    # 1) verify some data in the database
    exec_command(
        capfd,
        f'{psql} "select name, description from role"\'',
        "normal_user       | User",
    )

    # 2) Modify the data
    exec_command(
        capfd,
        f'{psql} "update role SET description=name"\'',
    )
    exec_command(
        capfd,
        f'{psql} "select name, description from role"\'',
        "normal_user       | normal_user",
    )
    # 3) restore the dump
    exec_command(
        capfd,
        f"restore postgres {postgres_dump_file}",
        "Starting restore on postgres...",
        "CREATE DATABASE",
        "ALTER DATABASE",
        f"Restore from data/backup/postgres/{postgres_dump_file} completed",
    )

    # 4) verify data match again point 1 (restore completed)
    exec_command(
        capfd,
        f'{psql} "select name, description from role"\'',
        "normal_user       | User",
    )
Esempio n. 27
0
def test_base(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "reload")

    project_name = random_project_name(faker)

    create_project(
        capfd=capfd,
        name=project_name,
        auth="no",
        frontend="no",
        services=["fail2ban"],
    )
    init_project(capfd)

    exec_command(capfd, "reload", "No service reloaded")
    exec_command(capfd, "reload backend", "No service reloaded")
    exec_command(capfd, "reload invalid", "No such service: invalid")
    exec_command(capfd, "reload backend invalid", "No such service: invalid")

    start_registry(capfd)
    pull_images(capfd)

    start_project(capfd)

    exec_command(capfd, "reload backend", "Reloading Flask...")

    if Configuration.swarm_mode:
        service = "backend"

        exec_command(
            capfd,
            "start backend",
            "Stack started",
        )

        exec_command(
            capfd,
            "scale backend=2 --wait",
            f"{project_name}_backend scaled to 2",
            "Service converged",
        )
    else:

        service = "fail2ban"
        exec_command(
            capfd,
            "scale fail2ban=2",
            "Scaling services: fail2ban=2...",
            "Services scaled: fail2ban=2",
        )

    time.sleep(4)

    docker = Docker()
    container1 = docker.get_container(service, slot=1)
    container2 = docker.get_container(service, slot=2)
    assert container1 is not None
    assert container2 is not None
    assert container1 != container2

    exec_command(
        capfd,
        f"reload {service}",
        f"Executing command on {container1[0]}",
        f"Executing command on {container2[0]}",
    )

    exec_command(capfd, "shell backend -u root 'rm /usr/local/bin/reload'")

    exec_command(
        capfd, "reload backend", "Service backend does not support the reload command"
    )

    exec_command(capfd, "remove", "Stack removed")
Esempio n. 28
0
def test_swarm_multi_host(capfd: Capture) -> None:

    if not Configuration.swarm_mode:
        return None

    rand = random.SystemRandom()

    auth = rand.choice((
        "postgres",
        "mysql",
        "neo4j",
    ))

    create_project(
        capfd=capfd,
        name="swarm",
        auth=auth,
        frontend="no",
    )

    for node in docker.node.list():
        if node.spec.role.lower() == "manager":
            MANAGER_ADDRESS = node.status.addr

    assert MANAGER_ADDRESS is not None
    # IP=$(echo $TOKEN | awk {'print $6'} | awk -F: {'print $1'})
    # REGISTRY_HOST="${IP}:5000"
    # NFS_HOST="${IP}

    exec_command(
        capfd,
        "-e HEALTHCHECK_INTERVAL=1s init",
        "docker compose is installed",
        "NFS Server is enabled",
        # already initialized before the test, in the workflow yml
        "Swarm is already initialized",
        "Project initialized",
    )

    start_registry(capfd)
    pull_images(capfd)

    exec_command(
        capfd,
        "start backend",
        "A volume path is missing and can't be automatically created: ",
        f"Suggested command: {colors.RED}sudo mkdir -p /volumes/ssl_certs",
        "&& sudo chown ",
    )

    exec_command(
        capfd,
        "-e HEALTHCHECK_INTERVAL=1s -e NFS_EXPORTS_SSL_CERTS=/tmp/ssl_certs init -f",
    )
    # Deploy a sub-stack
    exec_command(
        capfd,
        "start backend",
        "A volume path was missing and was automatically created: /tmp/ssl_certs",
        "Stack started",
    )

    exec_command(
        capfd,
        "status",
        "Manager",
        # Still unable to add workers because GA instances lack nested virtualization
        # See details in pytests.yml (VT-x is not available)
        # "Worker",
        "Ready+Active",
        "swarm_backend",
        " [1]",
        # "running",
        # This is because NFS is not installed/configured for this test...
        # to be completed
        "error while mounting volume",
    )
Esempio n. 29
0
def test_password_backend(capfd: Capture, faker: Faker) -> None:

    project_name = random_project_name(faker)
    create_project(
        capfd=capfd,
        name=project_name,
        auth="postgres",
        frontend="no",
    )

    init_project(capfd, "-e API_AUTOSTART=1")

    # Let's simplify this task by removing task history
    # Otherwise the wait_until very usual fails due to the logs or previous tasks
    if Configuration.swarm_mode:
        docker.swarm.update(task_history_limit=0)

    start_registry(capfd)

    now = datetime.now()
    today = now.strftime("%Y-%m-%d")

    exec_command(
        capfd,
        "password backend --random",
        "Can't update backend because it is not running. Please start your stack",
    )

    exec_command(
        capfd,
        "password",
        f"backend    AUTH_DEFAULT_PASSWORD  {colors.RED}N/A",
    )

    pull_images(capfd)
    start_project(capfd)

    wait_until(capfd, "logs backend --tail 10", "Boot completed")
    # in dev mode Flask loads the app two times... A "boot completed" only states that
    # the app is loaded at least once, and the second time will success for sure
    # But can't say if now flask is really ready or still loading the second time
    # Added a sleep to wait for the eventual second load
    time.sleep(2)

    exec_command(capfd, "logs backend --tail 10")

    r = requests.post(
        "http://127.0.0.1:8080/auth/login",
        json={
            "username": get_variable_from_projectrc("AUTH_DEFAULT_USERNAME"),
            "password": get_variable_from_projectrc("AUTH_DEFAULT_PASSWORD"),
        },
    )
    exec_command(capfd, "logs backend --tail 10")
    assert r.status_code == 200

    backend_start_date = get_container_start_date(capfd, "backend")
    backend_pass1 = get_variable_from_projectrc("AUTH_DEFAULT_PASSWORD")

    exec_command(
        capfd,
        "password backend --random",
        "backend was running, restarting services...",
        "The password of backend has been changed. ",
        "Please find the new password into your .projectrc file as "
        "AUTH_DEFAULT_PASSWORD variable",
    )

    backend_pass2 = get_variable_from_projectrc("AUTH_DEFAULT_PASSWORD")
    assert backend_pass1 != backend_pass2

    backend_start_date2 = get_container_start_date(capfd, "backend", wait=True)

    # Verify that backend is restarted
    assert backend_start_date2 != backend_start_date

    # This is needed to wait for the service rolling update
    if Configuration.swarm_mode:
        time.sleep(5)

    wait_until(capfd, "logs backend --tail 10", "Boot completed")
    # in dev mode Flask loads the app two times... A "boot completed" only states that
    # the app is loaded at least once, and the second time will success for sure
    # But can't say if now flask is really ready or still loading the second time
    # Added a sleep to wait for the eventual second load
    time.sleep(2)

    r = requests.post(
        "http://127.0.0.1:8080/auth/login",
        json={
            "username": get_variable_from_projectrc("AUTH_DEFAULT_USERNAME"),
            "password": get_variable_from_projectrc("AUTH_DEFAULT_PASSWORD"),
        },
    )
    exec_command(capfd, "logs backend --tail 10")
    assert r.status_code == 200

    exec_command(
        capfd,
        "password",
        f"backend    AUTH_DEFAULT_PASSWORD  {colors.GREEN}{today}",
    )

    mypassword = faker.pystr()
    exec_command(
        capfd,
        f"password backend --password {mypassword}",
        "The password of backend has been changed. ",
    )
    assert mypassword == get_variable_from_projectrc("AUTH_DEFAULT_PASSWORD")

    exec_command(
        capfd,
        "password --show",
        mypassword,
    )

    # This is needed to wait for the service rolling update
    if Configuration.swarm_mode:
        time.sleep(5)

    wait_until(capfd, "logs backend --tail 10", "Boot completed")
    # in dev mode Flask loads the app two times... A "boot completed" only states that
    # the app is loaded at least once, and the second time will success for sure
    # But can't say if now flask is really ready or still loading the second time
    # Added a sleep to wait for the eventual second load
    time.sleep(2)

    r = requests.post(
        "http://127.0.0.1:8080/auth/login",
        json={
            "username": get_variable_from_projectrc("AUTH_DEFAULT_USERNAME"),
            "password": get_variable_from_projectrc("AUTH_DEFAULT_PASSWORD"),
        },
    )
    exec_command(capfd, "logs backend --tail 10")
    assert r.status_code == 200

    future = now + timedelta(days=PASSWORD_EXPIRATION + 1)
    expired = (now + timedelta(days=PASSWORD_EXPIRATION)).strftime("%Y-%m-%d")

    with freeze_time(future):
        exec_command(
            capfd,
            "password",
            f"backend    AUTH_DEFAULT_PASSWORD  {colors.RED}{today}",
        )

        exec_command(
            capfd,
            "check -i main --no-git --no-builds",
            f"AUTH_DEFAULT_PASSWORD is expired on {expired}",
        )

    # Cleanup the stack for the next test
    exec_command(capfd, "remove", "Stack removed")
Esempio n. 30
0
def test_all(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "backup neo4j")
    execute_outside(capfd, "restore neo4j")

    backup_folder = BACKUP_DIR.joinpath("neo4j")

    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="neo4j",
        frontend="no",
    )
    init_project(capfd)
    start_registry(capfd)

    exec_command(
        capfd,
        "backup neo4j",
        f"image, execute {colors.RED}rapydo pull neo4j",
    )
    exec_command(
        capfd,
        "restore neo4j",
        f"image, execute {colors.RED}rapydo pull neo4j",
    )

    pull_images(capfd)
    start_project(capfd)

    service_verify(capfd, "neo4j")

    # This will initialize neo4j
    exec_command(capfd, "shell backend 'restapi init'")

    time.sleep(25)
    # Just some delay extra delay. restapi init alone not always is enough...
    if Configuration.swarm_mode:
        time.sleep(30)

    # Verify the initialization
    cypher = "shell neo4j 'bin/cypher-shell"
    exec_command(
        capfd,
        f'{cypher} "match (r: Role) return r.name, r.description"\'',
        '"normal_user", "User"',
    )

    # Backup command
    exec_command(
        capfd,
        "backup neo4j",
        "Neo4j is running and the backup will temporary stop it. "
        "If you want to continue add --force flag",
    )
    exec_command(
        capfd,
        "backup neo4j --force --restart backend --restart rabbit",
        "Starting backup on neo4j...",
        "Backup completed: data/backup/neo4j/",
        "Restarting services in 20 seconds...",
        "Restarting services in 10 seconds...",
    )
    # This is to verify that --force restarted neo4j
    exec_command(
        capfd,
        "backup neo4j",
        "Neo4j is running and the backup will temporary stop it. "
        "If you want to continue add --force flag",
    )

    exec_command(
        capfd,
        "backup invalid",
        "Invalid value for",
        "'invalid' is not one of 'mariadb', 'neo4j', 'postgres', 'rabbit', 'redis'",
    )

    exec_command(capfd, "remove", "Stack removed")

    exec_command(
        capfd,
        "backup neo4j",
        "Starting backup on neo4j...",
        "Backup completed: data/backup/neo4j/",
    )

    # Test backup retention
    exec_command(
        capfd,
        "backup neo4j --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on neo4j...",
        "Backup completed: data/backup/neo4j/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup neo4j --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on neo4j...",
        "Backup completed: data/backup/neo4j/",
    )

    exec_command(
        capfd,
        "backup neo4j --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on neo4j...",
        "Backup completed: data/backup/neo4j/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup neo4j --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on neo4j...",
        "Backup completed: data/backup/neo4j/",
    )

    # Create an additional backup to the test deletion (now backups are 3)
    exec_command(
        capfd,
        "backup neo4j",
        "Starting backup on neo4j...",
        "Backup completed: data/backup/neo4j/",
    )

    # Save the current number of backup files
    number_of_backups = len(list(backup_folder.glob("*")))

    # Verify the deletion
    exec_command(
        capfd,
        "backup neo4j --max 1",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on neo4j...",
        "Backup completed: data/backup/neo4j/",
    )

    # Now the number of backups should be reduced by 1 (i.e. +1 -2)
    assert len(list(backup_folder.glob("*"))) == number_of_backups - 1

    # Verify that --max ignores files without the date pattern
    backup_folder.joinpath("xyz").touch(exist_ok=True)
    backup_folder.joinpath("xyz.ext").touch(exist_ok=True)
    backup_folder.joinpath("2020_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_01").touch(exist_ok=True)
    backup_folder.joinpath("9999_01_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_99_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_99-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-99_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_99_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_99.bak").touch(exist_ok=True)

    exec_command(
        capfd,
        "backup neo4j --max 999 --dry-run",
        "Dry run mode is enabled",
        # Still finding 2, all files above are ignore because not matching the pattern
        "Found 2 backup files, maximum not reached",
        "Starting backup on neo4j...",
        "Backup completed: data/backup/neo4j/",
    )

    exec_command(capfd, "start", "Stack started")

    # Just some delay extra delay, neo4j is a slow starter
    time.sleep(25)

    # Restore command
    exec_command(capfd, "restore neo4j",
                 "Please specify one of the following backup:", ".dump")

    exec_command(
        capfd,
        "restore neo4j invalid",
        "Invalid backup file, data/backup/neo4j/invalid does not exist",
    )

    with TemporaryRemovePath(BACKUP_DIR):
        exec_command(
            capfd,
            "restore neo4j",
            "No backup found, the following folder "
            "does not exist: data/backup/neo4j",
        )

    with TemporaryRemovePath(backup_folder):
        exec_command(
            capfd,
            "restore neo4j",
            f"No backup found, the following folder does not exist: {backup_folder}",
        )

        os.mkdir("data/backup/neo4j")

        exec_command(
            capfd,
            "restore neo4j",
            "No backup found, data/backup/neo4j is empty",
        )

        open("data/backup/neo4j/test.gz", "a").close()

        exec_command(
            capfd,
            "restore neo4j",
            "No backup found, data/backup/neo4j is empty",
        )

        open("data/backup/neo4j/test.dump", "a").close()

        exec_command(
            capfd,
            "restore neo4j",
            "Please specify one of the following backup:",
            "test.dump",
        )

        os.remove("data/backup/neo4j/test.gz")
        os.remove("data/backup/neo4j/test.dump")

    # Test restore on neo4j (required neo4j to be down)
    files = os.listdir("data/backup/neo4j")
    files = [f for f in files if f.endswith(".dump")]
    files.sort()
    neo4j_dump_file = files[-1]

    time.sleep(25)

    # Here we test the restore procedure:
    # 1) verify some data in the database
    exec_command(
        capfd,
        f'{cypher} "match (r: Role) return r.name, r.description"\'',
        '"normal_user", "User"',
    )

    # 2) Modify the data
    exec_command(capfd,
                 f'{cypher} "match (r: Role) SET r.description = r.name"\'')
    exec_command(
        capfd,
        f'{cypher} "match (r: Role) return r.name, r.description"\'',
        '"normal_user", "normal_user"',
    )
    exec_command(capfd, "remove")

    # 3) restore the dump
    exec_command(
        capfd,
        f"restore neo4j {neo4j_dump_file}",
        "Starting restore on neo4j...",
        "Done: ",
        f"Restore from data/backup/neo4j/{neo4j_dump_file} completed",
    )

    exec_command(capfd, "start", "Stack started")

    exec_command(
        capfd,
        f"restore neo4j {neo4j_dump_file}",
        "Neo4j is running and the restore will temporary stop it.",
        "If you want to continue add --force flag",
    )

    exec_command(
        capfd,
        f"restore neo4j {neo4j_dump_file} --force --restart backend",
        "Starting restore on neo4j...",
        "Done: ",
        f"Restore from data/backup/neo4j/{neo4j_dump_file} completed",
        "Restarting services in 20 seconds...",
        "Restarting services in 10 seconds...",
    )

    # Wait neo4j to completely startup
    service_verify(capfd, "neo4j")

    # 4) verify data match again point 1 (restore completed)
    exec_command(
        capfd,
        f'{cypher} "match (r: Role) return r.name, r.description"\'',
        '"normal_user", "User"',
    )