Example #1
0
def test_password_mysql(capfd: Capture, faker: Faker) -> None:

    project_name = random_project_name(faker)
    create_project(
        capfd=capfd,
        name=project_name,
        auth="mysql",
        frontend="no",
    )

    init_project(capfd, "-e API_AUTOSTART=1")
    start_registry(capfd)

    now = datetime.now()
    today = now.strftime("%Y-%m-%d")

    exec_command(
        capfd,
        "password mariadb --random",
        "Can't update mariadb because it is not running. Please start your stack",
    )

    exec_command(
        capfd,
        "password",
        f"mariadb    ALCHEMY_PASSWORD       {colors.RED}N/A",
        # f"mariadb    MYSQL_ROOT_PASSWORD    {colors.RED}N/A",
    )

    pull_images(capfd)
    start_project(capfd)

    service_verify(capfd, "sqlalchemy")

    backend_start_date = get_container_start_date(capfd, "backend")
    mariadb_start_date = get_container_start_date(capfd, "mariadb")
    mariadb_pass1 = get_variable_from_projectrc("ALCHEMY_PASSWORD")

    exec_command(
        capfd,
        "password mariadb --random",
        "mariadb was running, restarting services...",
        "The password of mariadb has been changed. ",
        "Please find the new password into your .projectrc file as "
        "ALCHEMY_PASSWORD variable",
    )

    mariadb_pass2 = get_variable_from_projectrc("ALCHEMY_PASSWORD")
    assert mariadb_pass1 != mariadb_pass2

    backend_start_date2 = get_container_start_date(capfd, "backend", wait=True)
    mariadb_start_date2 = get_container_start_date(capfd,
                                                   "mariadb",
                                                   wait=False)

    # Verify that both backend and mariadb are restarted
    assert backend_start_date2 != backend_start_date
    assert mariadb_start_date2 != mariadb_start_date

    service_verify(capfd, "sqlalchemy")

    exec_command(
        capfd,
        "password",
        f"mariadb    ALCHEMY_PASSWORD       {colors.GREEN}{today}",
        # f"mariadb    MYSQL_ROOT_PASSWORD    {colors.GREEN}{today}",
    )

    mypassword = faker.pystr()
    exec_command(
        capfd,
        f"password mariadb --password {mypassword}",
        "The password of mariadb has been changed. ",
    )
    assert mypassword == get_variable_from_projectrc("ALCHEMY_PASSWORD")

    exec_command(
        capfd,
        "password --show",
        mypassword,
    )

    if Configuration.swarm_mode:
        time.sleep(5)

    service_verify(capfd, "sqlalchemy")

    future = now + timedelta(days=PASSWORD_EXPIRATION + 1)
    expired = (now + timedelta(days=PASSWORD_EXPIRATION)).strftime("%Y-%m-%d")

    with freeze_time(future):
        exec_command(
            capfd,
            "password",
            f"mariadb    ALCHEMY_PASSWORD       {colors.RED}{today}",
        )

        exec_command(
            capfd,
            "check -i main --no-git --no-builds",
            f"ALCHEMY_PASSWORD is expired on {expired}",
        )

    # Cleanup the stack for the next test
    exec_command(capfd, "remove", "Stack removed")
Example #2
0
def test_all(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "backup rabbit")
    execute_outside(capfd, "restore rabbit")

    backup_folder = BACKUP_DIR.joinpath("rabbit")

    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="no",
        frontend="no",
        services=["rabbit"],
    )
    init_project(capfd)
    start_registry(capfd)

    exec_command(
        capfd,
        "backup rabbit",
        f"image, execute {colors.RED}rapydo pull rabbit",
    )
    exec_command(
        capfd,
        "restore rabbit",
        f"image, execute {colors.RED}rapydo pull rabbit",
    )

    pull_images(capfd)
    start_project(capfd)

    exec_command(capfd, "status")
    service_verify(capfd, "rabbitmq")

    # Just some delay extra delay, rabbit is a slow starter
    time.sleep(5)

    # NOTE: q = rabbitmq.__name__ is just to have a fixed name to be used to test the
    # queue without the need to introdure further nested " or '
    query_queue = "shell backend \"/usr/bin/python3 -c 'from restapi.connectors import rabbitmq; q = rabbitmq.__name__; r = rabbitmq.get_instance();print(q, r.queue_exists(q));'\""
    create_queue = "shell backend \"/usr/bin/python3 -c 'from restapi.connectors import rabbitmq; q = rabbitmq.__name__; r = rabbitmq.get_instance(); r.create_queue(q);'\""
    delete_queue = "shell backend \"/usr/bin/python3 -c 'from restapi.connectors import rabbitmq; q = rabbitmq.__name__; r = rabbitmq.get_instance(); r.delete_queue(q);'\""

    exec_command(capfd, query_queue, "restapi.connectors.rabbitmq False")

    exec_command(
        capfd,
        create_queue,
    )

    exec_command(capfd, query_queue, "restapi.connectors.rabbitmq True")

    # Backup command
    exec_command(
        capfd,
        "backup rabbit",
        "RabbitMQ is running and the backup will temporary stop it. "
        "If you want to continue add --force flag",
    )
    exec_command(
        capfd,
        "backup rabbit --force --restart backend",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
        "Restarting services in 20 seconds...",
        "Restarting services in 10 seconds...",
    )
    # This is to verify that --force restarted rabbit
    exec_command(
        capfd,
        "backup rabbit",
        "RabbitMQ is running and the backup will temporary stop it. "
        "If you want to continue add --force flag",
    )

    exec_command(
        capfd,
        "backup invalid",
        "Invalid value for",
        "'invalid' is not one of 'mariadb', 'neo4j', 'postgres', 'rabbit', 'redis'",
    )

    exec_command(capfd, "remove", "Stack removed")

    exec_command(
        capfd,
        "backup rabbit",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )

    # Test backup retention
    exec_command(
        capfd,
        "backup rabbit --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup rabbit --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )

    exec_command(
        capfd,
        "backup rabbit --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup rabbit --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )

    # Create an additional backup to the test deletion (now backups are 3)
    exec_command(
        capfd,
        "backup rabbit",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )

    # Save the current number of backup files
    number_of_backups = len(list(backup_folder.glob("*")))

    # Verify the deletion
    exec_command(
        capfd,
        "backup rabbit --max 1",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )

    # Now the number of backups should be reduced by 1 (i.e. +1 -2)
    assert len(list(backup_folder.glob("*"))) == number_of_backups - 1

    # Verify that --max ignores files without the date pattern
    backup_folder.joinpath("xyz").touch(exist_ok=True)
    backup_folder.joinpath("xyz.ext").touch(exist_ok=True)
    backup_folder.joinpath("2020_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_01").touch(exist_ok=True)
    backup_folder.joinpath("9999_01_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_99_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_99-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-99_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_99_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_99.bak").touch(exist_ok=True)

    exec_command(
        capfd,
        "backup rabbit --max 999 --dry-run",
        "Dry run mode is enabled",
        # Still finding 2, all files above are ignore because not matching the pattern
        "Found 2 backup files, maximum not reached",
        "Starting backup on rabbit...",
        "Backup completed: data/backup/rabbit/",
    )

    exec_command(capfd, "start backend rabbit")

    # Just some delay extra delay, rabbit is a slow starter
    if Configuration.swarm_mode:
        time.sleep(20)
    else:
        time.sleep(10)

    exec_command(
        capfd,
        delete_queue,
    )

    exec_command(capfd, query_queue, "restapi.connectors.rabbitmq False")

    # Restore command
    exec_command(
        capfd,
        "restore rabbit",
        "Please specify one of the following backup:",
        ".tar.gz",
    )

    exec_command(
        capfd,
        "restore rabbit invalid",
        "Invalid backup file, data/backup/rabbit/invalid does not exist",
    )

    with TemporaryRemovePath(BACKUP_DIR):
        exec_command(
            capfd,
            "restore rabbit",
            "No backup found, the following folder "
            "does not exist: data/backup/rabbit",
        )

    with TemporaryRemovePath(backup_folder):
        exec_command(
            capfd,
            "restore rabbit",
            f"No backup found, the following folder does not exist: {backup_folder}",
        )

        os.mkdir("data/backup/rabbit")

        exec_command(
            capfd,
            "restore rabbit",
            "No backup found, data/backup/rabbit is empty",
        )

        open("data/backup/rabbit/test.gz", "a").close()

        exec_command(
            capfd,
            "restore rabbit",
            "No backup found, data/backup/rabbit is empty",
        )

        open("data/backup/rabbit/test.tar.gz", "a").close()

        exec_command(
            capfd,
            "restore rabbit",
            "Please specify one of the following backup:",
            "test.tar.gz",
        )

        os.remove("data/backup/rabbit/test.gz")
        os.remove("data/backup/rabbit/test.tar.gz")

    # Test restore on rabbit (required rabbit to be down)
    files = os.listdir("data/backup/rabbit")
    files = [f for f in files if f.endswith(".tar.gz")]
    files.sort()
    rabbit_dump_file = files[-1]

    exec_command(capfd, "remove")
    # 3) restore the dump
    exec_command(
        capfd,
        f"restore rabbit {rabbit_dump_file}",
        "Starting restore on rabbit...",
        f"Restore from data/backup/rabbit/{rabbit_dump_file} completed",
    )

    exec_command(capfd, "start", "Stack started")
    # 4) verify data match again point 1 (restore completed)
    # postponed because rabbit needs time to start...

    exec_command(
        capfd,
        f"restore rabbit {rabbit_dump_file}",
        "RabbitMQ is running and the restore will temporary stop it.",
        "If you want to continue add --force flag",
    )

    exec_command(
        capfd,
        f"restore rabbit {rabbit_dump_file} --force --restart backend",
        "Starting restore on rabbit...",
        f"Restore from data/backup/rabbit/{rabbit_dump_file} completed",
        "Restarting services in 20 seconds...",
        "Restarting services in 10 seconds...",
    )

    # Wait rabbit to completely startup
    service_verify(capfd, "rabbitmq")

    exec_command(capfd, query_queue, "restapi.connectors.rabbitmq True")
Example #3
0
def test_password_redis(capfd: Capture, faker: Faker) -> None:

    project_name = random_project_name(faker)
    create_project(
        capfd=capfd,
        name=project_name,
        auth="no",
        frontend="no",
        services=["redis"],
    )

    init_project(capfd, "-e API_AUTOSTART=1")
    start_registry(capfd)

    now = datetime.now()
    today = now.strftime("%Y-%m-%d")

    exec_command(
        capfd,
        "password",
        f"redis      REDIS_PASSWORD         {colors.RED}N/A",
    )

    redis_pass1 = get_variable_from_projectrc("REDIS_PASSWORD")
    exec_command(
        capfd,
        "password redis --random",
        "redis was not running, restart is not needed",
        "The password of redis has been changed. ",
        "Please find the new password into your .projectrc file as "
        "REDIS_PASSWORD variable",
    )
    redis_pass2 = get_variable_from_projectrc("REDIS_PASSWORD")
    assert redis_pass1 != redis_pass2

    exec_command(
        capfd,
        "password",
        f"redis      REDIS_PASSWORD         {colors.GREEN}{today}",
    )

    pull_images(capfd)
    start_project(capfd)

    service_verify(capfd, "redis")

    backend_start_date = get_container_start_date(capfd, "backend")
    redis_start_date = get_container_start_date(capfd, "redis")

    exec_command(
        capfd,
        "password redis --random",
        "redis was running, restarting services...",
        "The password of redis has been changed. ",
        "Please find the new password into your .projectrc file as "
        "REDIS_PASSWORD variable",
    )

    redis_pass3 = get_variable_from_projectrc("REDIS_PASSWORD")
    assert redis_pass2 != redis_pass3

    backend_start_date2 = get_container_start_date(capfd, "backend", wait=True)
    redis_start_date2 = get_container_start_date(capfd, "redis", wait=False)

    # Verify that both backend and redis are restarted
    assert backend_start_date2 != backend_start_date
    assert redis_start_date2 != redis_start_date

    service_verify(capfd, "redis")

    exec_command(
        capfd,
        "password",
        f"redis      REDIS_PASSWORD         {colors.GREEN}{today}",
    )

    mypassword = faker.pystr()
    exec_command(
        capfd,
        f"password redis --password {mypassword}",
        "The password of redis has been changed. ",
    )
    assert mypassword == get_variable_from_projectrc("REDIS_PASSWORD")

    exec_command(
        capfd,
        "password --show",
        mypassword,
    )

    if Configuration.swarm_mode:
        time.sleep(5)

    service_verify(capfd, "redis")

    future = now + timedelta(days=PASSWORD_EXPIRATION + 1)
    expired = (now + timedelta(days=PASSWORD_EXPIRATION)).strftime("%Y-%m-%d")

    with freeze_time(future):
        exec_command(
            capfd,
            "password",
            f"redis      REDIS_PASSWORD         {colors.RED}{today}",
        )

        exec_command(
            capfd,
            "check -i main --no-git --no-builds",
            f"REDIS_PASSWORD is expired on {expired}",
        )

    # Cleanup the stack for the next test
    exec_command(capfd, "remove", "Stack removed")
Example #4
0
def test_tuning(capfd: Capture, faker: Faker) -> None:

    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="neo4j",
        services=["postgres"],
        frontend="no",
    )
    init_project(capfd)

    start_registry(capfd)

    exec_command(
        capfd,
        "tuning neo4j",
        f"image, execute {colors.RED}rapydo pull neo4j",
    )

    pull_images(capfd)

    # Tuning command with neo4j container OFF
    exec_command(
        capfd,
        "tuning neo4j",
        "Number of CPU(s): ",
        "Amount of RAM: ",
        "Suggested settings:",
        "Use 'dbms.memory.heap.max_size' as NEO4J_HEAP_SIZE",
        "Use 'dbms.memory.pagecache.size' as NEO4J_PAGECACHE_SIZE",
        "Memory settings recommendation from neo4j-admin memrec:",
        "Based on the above, the following memory settings are recommended:",
        "dbms.memory.heap.initial_size=",
        "dbms.memory.heap.max_size=",
        "dbms.memory.pagecache.size=",
        "Total size of lucene indexes in all databases:",
        "Total size of data and native indexes in all databases:",
    )

    start_project(capfd)

    service_verify(capfd, "neo4j")
    service_verify(capfd, "sqlalchemy")

    exec_command(
        capfd,
        "tuning backend",
        "Number of CPU(s): ",
        "Amount of RAM: ",
        "Suggested settings:",
        "GUNICORN_MAX_NUM_WORKERS",
    )

    # Tuning command with neo4j container ON
    exec_command(
        capfd,
        "tuning neo4j",
        "Number of CPU(s): ",
        "Amount of RAM: ",
        "Suggested settings:",
        "Use 'dbms.memory.heap.max_size' as NEO4J_HEAP_SIZE",
        "Use 'dbms.memory.pagecache.size' as NEO4J_PAGECACHE_SIZE",
        "Memory settings recommendation from neo4j-admin memrec:",
        "Based on the above, the following memory settings are recommended:",
        "dbms.memory.heap.initial_size=",
        "dbms.memory.heap.max_size=",
        "dbms.memory.pagecache.size=",
        "Total size of lucene indexes in all databases:",
        "Total size of data and native indexes in all databases:",
    )

    exec_command(
        capfd,
        "tuning postgres",
        "Number of CPU(s): ",
        "Amount of RAM: ",
        "Suggested settings:",
        "POSTGRES_SHARED_BUFFERS",
        "POSTGRES_EFFECTIVE_CACHE_SIZE",
        "POSTGRES_MAINTENANCE_WORK_MEM",
        "POSTGRES_MAX_WORKER_PROCESSES",
    )
Example #5
0
def test_all(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "backup postgres")
    execute_outside(capfd, "restore postgres")

    backup_folder = BACKUP_DIR.joinpath("postgres")

    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="postgres",
        frontend="no",
    )
    init_project(capfd)
    start_registry(capfd)

    exec_command(
        capfd,
        "backup postgres",
        f"image, execute {colors.RED}rapydo pull postgres",
    )
    exec_command(
        capfd,
        "restore postgres",
        f"image, execute {colors.RED}rapydo pull postgres",
    )

    pull_images(capfd)
    start_project(capfd)

    exec_command(capfd, "status")
    service_verify(capfd, "sqlalchemy")

    # This will initialize postgres
    exec_command(capfd, "shell backend 'restapi init'")

    # Verify the initialization
    psql = "shell postgres 'psql -U sqluser -d SQL_API -c"
    exec_command(
        capfd,
        f'{psql} "select name, description from role"\'',
        "normal_user       | User",
    )

    exec_command(
        capfd,
        "backup postgres",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )

    # A second backup is needed to test backup retention
    exec_command(
        capfd,
        "backup postgres",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )

    # Test backup retention
    exec_command(
        capfd,
        "backup postgres --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup postgres --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )

    exec_command(
        capfd,
        "backup postgres --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup postgres --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )

    # Create an additional backup to the test deletion (now backups are 3)
    exec_command(
        capfd,
        "backup postgres",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )
    # Save the current number of backup files
    number_of_backups = len(list(backup_folder.glob("*")))

    # Verify the deletion
    exec_command(
        capfd,
        "backup postgres --max 1",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )

    # Now the number of backups should be reduced by 1 (i.e. +1 -2)
    assert len(list(backup_folder.glob("*"))) == number_of_backups - 1

    # Verify that --max ignores files without the date pattern
    backup_folder.joinpath("xyz").touch(exist_ok=True)
    backup_folder.joinpath("xyz.ext").touch(exist_ok=True)
    backup_folder.joinpath("2020_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_01").touch(exist_ok=True)
    backup_folder.joinpath("9999_01_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_99_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_99-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-99_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_99_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_99.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_99.gz").touch(exist_ok=True)

    exec_command(
        capfd,
        "backup postgres --max 999 --dry-run",
        "Dry run mode is enabled",
        # Still finding 2, all files above are ignore because not matching the pattern
        "Found 2 backup files, maximum not reached",
        "Starting backup on postgres...",
        "Backup completed: data/backup/postgres/",
    )

    exec_command(
        capfd,
        "backup invalid",
        "Invalid value for",
        "'invalid' is not one of 'mariadb', 'neo4j', 'postgres', 'rabbit', 'redis'",
    )

    exec_command(capfd, "remove", "Stack removed")

    exec_command(
        capfd,
        "backup postgres",
        "The backup procedure requires postgres running, please start your stack",
    )

    exec_command(
        capfd,
        "restore postgres",
        "Please specify one of the following backup:",
        ".sql.gz",
    )
    exec_command(
        capfd,
        "restore postgres invalid",
        "Invalid backup file, data/backup/postgres/invalid does not exist",
    )

    with TemporaryRemovePath(BACKUP_DIR):
        exec_command(
            capfd,
            "restore postgres",
            "No backup found, the following folder "
            "does not exist: data/backup/postgres",
        )

    with TemporaryRemovePath(backup_folder):
        exec_command(
            capfd,
            "restore postgres",
            f"No backup found, the following folder does not exist: {backup_folder}",
        )

        os.mkdir("data/backup/postgres")

        exec_command(
            capfd,
            "restore postgres",
            "No backup found, data/backup/postgres is empty",
        )

        open("data/backup/postgres/test.sql.gz", "a").close()

        exec_command(
            capfd,
            "restore postgres",
            "Please specify one of the following backup:",
            "test.sql.gz",
        )

        os.remove("data/backup/postgres/test.sql.gz")

    files = os.listdir("data/backup/postgres")
    files = [f for f in files if f.endswith(".sql.gz")]
    files.sort()
    postgres_dump_file = files[-1]

    # Postgres restore not allowed if container is not running
    exec_command(
        capfd,
        f"restore postgres {postgres_dump_file}",
        "The restore procedure requires postgres running, please start your stack",
    )

    exec_command(capfd, "start", "Stack started")

    # Here we test the restore procedure:
    # 1) verify some data in the database
    exec_command(
        capfd,
        f'{psql} "select name, description from role"\'',
        "normal_user       | User",
    )

    # 2) Modify the data
    exec_command(
        capfd,
        f'{psql} "update role SET description=name"\'',
    )
    exec_command(
        capfd,
        f'{psql} "select name, description from role"\'',
        "normal_user       | normal_user",
    )
    # 3) restore the dump
    exec_command(
        capfd,
        f"restore postgres {postgres_dump_file}",
        "Starting restore on postgres...",
        "CREATE DATABASE",
        "ALTER DATABASE",
        f"Restore from data/backup/postgres/{postgres_dump_file} completed",
    )

    # 4) verify data match again point 1 (restore completed)
    exec_command(
        capfd,
        f'{psql} "select name, description from role"\'',
        "normal_user       | User",
    )
Example #6
0
def test_all(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "ssl")

    project = random_project_name(faker)
    create_project(
        capfd=capfd,
        name=project,
        auth="neo4j",
        frontend="no",
        services=["rabbit"],
    )
    pconf = f"projects/{project}/project_configuration.yaml"

    exec_command(
        capfd,
        "--prod init -f",
        "Created default .projectrc file",
        "Project initialized",
    )

    start_registry(capfd)

    exec_command(
        capfd,
        "ssl",
        f"image, execute {colors.RED}rapydo pull proxy",
    )

    exec_command(
        capfd,
        "--prod pull --quiet",
        "Base images pulled from docker hub",
    )

    exec_command(
        capfd,
        "ssl",
        "The proxy is not running, start your stack or try with "
        f"{colors.RED}rapydo ssl --volatile",
    )

    # Before creating SSL certificates rabbit and neo4j should not be able to start
    exec_command(
        capfd,
        "run --debug rabbit",
        "SSL mandatory file not found: /ssl/real/fullchain1.pem",
    )

    exec_command(
        capfd,
        "run --debug neo4j",
        "SSL mandatory file not found: /ssl/real/fullchain1.pem",
    )

    exec_command(
        capfd,
        "ssl --volatile",
        "Creating a self signed SSL certificate",
        "Self signed SSL certificate successfully created",
        # Just to verify that the default does not change
        "Generating DH parameters, 1024 bit long safe prime, generator 2",
    )

    # Start to verify certificate creation while services are running
    exec_command(
        capfd,
        "--prod start",
    )

    # Needed because the next command requires rabbit already started
    # Otherwise will fail with:
    # Error: unable to perform an operation on node 'rabbit@rabbit'.
    # Please see diagnostics information and suggestions below.
    if Configuration.swarm_mode:
        # 60!? :| It still fails after raising to 30... Let's double it!!
        # 90!? :| It still fails after raising to 60!!
        time.sleep(90)
        # DEBUG CODE
        exec_command(capfd, "logs rabbit")
    else:
        time.sleep(5)

    service_verify(capfd, "rabbitmq")

    exec_command(
        capfd,
        "ssl --no-tty",
        "--no-tty option is deprecated, you can stop using it",
        "Creating a self signed SSL certificate",
        "Self signed SSL certificate successfully created",
        "Neo4j is running, a full restart is needed. NOT IMPLEMENTED YET.",
        "RabbitMQ is running, executing command to refresh the certificate",
        "New certificate successfully enabled",
    )

    exec_command(
        capfd,
        "ssl --chain-file /file",
        "Invalid chain file (you provided /file)",
    )
    exec_command(
        capfd,
        "ssl --key-file /file",
        "Invalid chain file (you provided none)",
    )

    exec_command(
        capfd,
        f"ssl --chain-file {pconf}",
        "Invalid key file (you provided none)",
    )
    exec_command(
        capfd,
        f"ssl --chain-file {pconf} --key-file /file",
        "Invalid key file (you provided /file)",
    )
    exec_command(
        capfd,
        f"ssl --chain-file {pconf} --key-file {pconf}",
        "Unable to automatically perform the requested operation",
        "You can execute the following commands by your-self:",
    )
Example #7
0
def test_password_rabbit(capfd: Capture, faker: Faker) -> None:

    project_name = random_project_name(faker)
    create_project(
        capfd=capfd,
        name=project_name,
        auth="no",
        frontend="no",
        services=["rabbit"],
    )

    init_project(capfd, "-e API_AUTOSTART=1")
    start_registry(capfd)

    now = datetime.now()
    today = now.strftime("%Y-%m-%d")

    exec_command(
        capfd,
        "password rabbit --random",
        "Can't update rabbit because it is not running. Please start your stack",
    )

    exec_command(
        capfd,
        "password",
        f"rabbit     RABBITMQ_PASSWORD      {colors.RED}N/A",
    )

    pull_images(capfd)
    start_project(capfd)

    service_verify(capfd, "rabbitmq")

    #  ############## RABBIT #####################

    backend_start_date = get_container_start_date(capfd, "backend")
    rabbit_start_date = get_container_start_date(capfd, "rabbit")
    rabbit_pass1 = get_variable_from_projectrc("RABBITMQ_PASSWORD")

    exec_command(
        capfd,
        "password rabbit --random",
        "rabbit was running, restarting services...",
        "The password of rabbit has been changed. ",
        "Please find the new password into your .projectrc file as "
        "RABBITMQ_PASSWORD variable",
    )

    rabbit_pass2 = get_variable_from_projectrc("RABBITMQ_PASSWORD")
    assert rabbit_pass1 != rabbit_pass2

    backend_start_date2 = get_container_start_date(capfd, "backend", wait=True)
    rabbit_start_date2 = get_container_start_date(capfd, "rabbit", wait=False)

    # Verify that both backend and rabbit are restarted
    assert backend_start_date2 != backend_start_date
    assert rabbit_start_date2 != rabbit_start_date

    service_verify(capfd, "rabbitmq")

    exec_command(
        capfd,
        "password",
        f"rabbit     RABBITMQ_PASSWORD      {colors.GREEN}{today}",
    )

    # Needed to prevent random:
    # failed to update service xyz_rabbit:
    # Error response from daemon:
    # rpc error: code = Unknown desc = update out of sequence
    if Configuration.swarm_mode:
        time.sleep(3)

    mypassword = faker.pystr()
    exec_command(
        capfd,
        f"password rabbit --password {mypassword}",
        "The password of rabbit has been changed. ",
    )
    assert mypassword == get_variable_from_projectrc("RABBITMQ_PASSWORD")

    exec_command(
        capfd,
        "password --show",
        mypassword,
    )

    if Configuration.swarm_mode:
        time.sleep(5)

    service_verify(capfd, "rabbitmq")

    future = now + timedelta(days=PASSWORD_EXPIRATION + 1)
    expired = (now + timedelta(days=PASSWORD_EXPIRATION)).strftime("%Y-%m-%d")

    with freeze_time(future):
        exec_command(
            capfd,
            "password",
            f"rabbit     RABBITMQ_PASSWORD      {colors.RED}{today}",
        )

        exec_command(
            capfd,
            "check -i main --no-git --no-builds",
            f"RABBITMQ_PASSWORD is expired on {expired}",
        )

    # Cleanup the stack for the next test
    exec_command(capfd, "remove", "Stack removed")
Example #8
0
def test_all(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "backup redis")
    execute_outside(capfd, "restore redis")

    backup_folder = BACKUP_DIR.joinpath("redis")

    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="no",
        frontend="no",
        services=["redis"],
    )
    init_project(capfd)
    start_registry(capfd)

    exec_command(
        capfd,
        "backup redis",
        f"image, execute {colors.RED}rapydo pull redis",
    )
    exec_command(
        capfd,
        "restore redis",
        f"image, execute {colors.RED}rapydo pull redis",
    )

    pull_images(capfd)
    start_project(capfd)

    service_verify(capfd, "redis")

    key = faker.pystr()
    value1 = f"old-{faker.pystr()}"
    value2 = f"new-{faker.pystr()}"

    # NOTE: q = redis.__name__ is just to have a fixed name to be used to test the
    # queue without the need to introdure further nested " or '
    get_key = f'shell redis "sh -c \'redis-cli --pass "$REDIS_PASSWORD" get {key}\'"'
    set_key1 = (
        f'shell redis "sh -c \'redis-cli --pass "$REDIS_PASSWORD" set {key} {value1}\'"'
    )
    set_key2 = (
        f'shell redis "sh -c \'redis-cli --pass "$REDIS_PASSWORD" set {key} {value2}\'"'
    )

    exec_command(
        capfd,
        set_key1,
    )

    exec_command(capfd, get_key, value1)

    # Backup command on a running Redis
    exec_command(
        capfd,
        "backup redis",
        "Starting backup on redis...",
        "Backup completed: data/backup/redis/",
    )

    exec_command(
        capfd,
        "backup invalid",
        "Invalid value for",
        "'invalid' is not one of 'mariadb', 'neo4j', 'postgres', 'rabbit', 'redis'",
    )

    exec_command(capfd, "remove", "Stack removed")

    # Backup command on a stopped Redis
    exec_command(
        capfd,
        "backup redis",
        "Starting backup on redis...",
        "Backup completed: data/backup/redis/",
    )

    # Test backup retention
    exec_command(
        capfd,
        "backup redis --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on redis...",
        "Backup completed: data/backup/redis/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup redis --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on redis...",
        "Backup completed: data/backup/redis/",
    )

    exec_command(
        capfd,
        "backup redis --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on redis...",
        "Backup completed: data/backup/redis/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup redis --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on redis...",
        "Backup completed: data/backup/redis/",
    )

    # Create an additional backup to the test deletion (now backups are 3)
    exec_command(
        capfd,
        "backup redis",
        "Starting backup on redis...",
        "Backup completed: data/backup/redis/",
    )

    # Save the current number of backup files
    number_of_backups = len(list(backup_folder.glob("*")))

    # Verify the deletion
    exec_command(
        capfd,
        "backup redis --max 1",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on redis...",
        "Backup completed: data/backup/redis/",
    )

    # Now the number of backups should be reduced by 1 (i.e. +1 -2)
    assert len(list(backup_folder.glob("*"))) == number_of_backups - 1

    # Verify that --max ignores files without the date pattern
    backup_folder.joinpath("xyz").touch(exist_ok=True)
    backup_folder.joinpath("xyz.ext").touch(exist_ok=True)
    backup_folder.joinpath("2020_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_01").touch(exist_ok=True)
    backup_folder.joinpath("9999_01_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_99_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_99-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-99_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_99_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_99.bak").touch(exist_ok=True)

    exec_command(
        capfd,
        "backup redis --max 999 --dry-run",
        "Dry run mode is enabled",
        # Still finding 2, all files above are ignore because not matching the pattern
        "Found 2 backup files, maximum not reached",
        "Starting backup on redis...",
        "Backup completed: data/backup/redis/",
    )

    exec_command(capfd, "start backend redis")

    exec_command(
        capfd,
        set_key2,
    )

    exec_command(capfd, get_key, value2)

    # Restore command
    exec_command(
        capfd,
        "restore redis",
        "Please specify one of the following backup:",
        ".tar.gz",
    )

    exec_command(
        capfd,
        "restore redis invalid",
        "Invalid backup file, data/backup/redis/invalid does not exist",
    )

    with TemporaryRemovePath(BACKUP_DIR):
        exec_command(
            capfd,
            "restore redis",
            "No backup found, the following folder "
            "does not exist: data/backup/redis",
        )

    with TemporaryRemovePath(backup_folder):
        exec_command(
            capfd,
            "restore redis",
            f"No backup found, the following folder does not exist: {backup_folder}",
        )

        os.mkdir("data/backup/redis")

        exec_command(
            capfd,
            "restore redis",
            "No backup found, data/backup/redis is empty",
        )

        open("data/backup/redis/test.gz", "a").close()

        exec_command(
            capfd,
            "restore redis",
            "No backup found, data/backup/redis is empty",
        )

        open("data/backup/redis/test.tar.gz", "a").close()

        exec_command(
            capfd,
            "restore redis",
            "Please specify one of the following backup:",
            "test.tar.gz",
        )

        os.remove("data/backup/redis/test.gz")
        os.remove("data/backup/redis/test.tar.gz")

    # Test restore on redis (required redis to be down)
    files = os.listdir("data/backup/redis")
    files = [f for f in files if f.endswith(".tar.gz")]
    files.sort()
    redis_dump_file = files[-1]

    exec_command(capfd, "remove redis")
    # 3) restore the dump
    exec_command(
        capfd,
        f"restore redis {redis_dump_file}",
        "Starting restore on redis...",
        f"Restore from data/backup/redis/{redis_dump_file} completed",
    )

    exec_command(capfd, "start", "Stack started")
    # 4) verify data match again point 1 (restore completed)
    # postponed because redis needs time to start...

    exec_command(
        capfd,
        f"restore redis {redis_dump_file}",
        "Redis is running and the restore will temporary stop it.",
        "If you want to continue add --force flag",
    )

    exec_command(
        capfd,
        f"restore redis {redis_dump_file} --force --restart backend",
        "Starting restore on redis...",
        f"Restore from data/backup/redis/{redis_dump_file} completed",
    )

    # Wait redis to completely startup
    service_verify(capfd, "redis")

    exec_command(capfd, get_key, value1)
Example #9
0
def test_all(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "backup mariadb")
    execute_outside(capfd, "restore mariadb")

    backup_folder = BACKUP_DIR.joinpath("mariadb")

    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="mysql",
        frontend="no",
    )
    init_project(capfd)
    start_registry(capfd)

    exec_command(
        capfd,
        "backup mariadb",
        f"image, execute {colors.RED}rapydo pull mariadb",
    )
    exec_command(
        capfd,
        "restore mariadb",
        f"image, execute {colors.RED}rapydo pull mariadb",
    )

    pull_images(capfd)
    start_project(capfd)

    exec_command(capfd, "status")
    service_verify(capfd, "sqlalchemy")

    # This will initialize mariadb
    exec_command(capfd, "shell backend 'restapi init'")

    def exec_query(query: str) -> str:

        command = 'shell mariadb "'
        command += 'sh -c \'mysql -uroot -p"$MYSQL_ROOT_PASSWORD" -D"$MYSQL_DATABASE" '
        command += f'-e \\"{query};\\"'
        # This is to close the sh -c 'command'
        command += "'"
        # This is to close the shell "command"
        command += '"'

        return command

    # Verify the initialization
    exec_command(
        capfd,
        exec_query("select name, description from role"),
        "normal_user\tUser",
    )

    exec_command(
        capfd,
        "backup mariadb",
        "Starting backup on mariadb...",
        "Backup completed: data/backup/mariadb/",
    )

    # A second backup is needed to test backup retention
    exec_command(
        capfd,
        "backup mariadb",
        "Starting backup on mariadb...",
        "Backup completed: data/backup/mariadb/",
    )

    # Test backup retention
    exec_command(
        capfd,
        "backup mariadb --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on mariadb...",
        "Backup completed: data/backup/mariadb/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup mariadb --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on mariadb...",
        "Backup completed: data/backup/mariadb/",
    )

    exec_command(
        capfd,
        "backup mariadb --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on mariadb...",
        "Backup completed: data/backup/mariadb/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup mariadb --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on mariadb...",
        "Backup completed: data/backup/mariadb/",
    )

    # Create an additional backup to the test deletion (now backups are 3)
    exec_command(
        capfd,
        "backup mariadb",
        "Starting backup on mariadb...",
        "Backup completed: data/backup/mariadb/",
    )
    # Save the current number of backup files
    number_of_backups = len(list(backup_folder.glob("*")))

    # Verify the deletion
    exec_command(
        capfd,
        "backup mariadb --max 1",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on mariadb...",
        "Backup completed: data/backup/mariadb/",
    )

    # Now the number of backups should be reduced by 1 (i.e. +1 -2)
    assert len(list(backup_folder.glob("*"))) == number_of_backups - 1

    # Verify that --max ignores files without the date pattern
    backup_folder.joinpath("xyz").touch(exist_ok=True)
    backup_folder.joinpath("xyz.ext").touch(exist_ok=True)
    backup_folder.joinpath("2020_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_01").touch(exist_ok=True)
    backup_folder.joinpath("9999_01_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_99_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_99-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-99_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_99_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_99.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_99.tar").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_99.tar.gz").touch(exist_ok=True)

    exec_command(
        capfd,
        "backup mariadb --max 999 --dry-run",
        "Dry run mode is enabled",
        # Still finding 2, all files above are ignore because not matching the pattern
        "Found 2 backup files, maximum not reached",
        "Starting backup on mariadb...",
        "Backup completed: data/backup/mariadb/",
    )

    exec_command(
        capfd,
        "backup invalid",
        "Invalid value for",
        "'invalid' is not one of 'mariadb', 'neo4j', 'postgres', 'rabbit', 'redis'",
    )

    exec_command(capfd, "remove", "Stack removed")

    exec_command(
        capfd,
        "backup mariadb",
        "The backup procedure requires mariadb running, please start your stack",
    )

    exec_command(
        capfd,
        "restore mariadb",
        "Please specify one of the following backup:",
        ".tar.gz",
    )
    exec_command(
        capfd,
        "restore mariadb invalid",
        "Invalid backup file, data/backup/mariadb/invalid does not exist",
    )

    with TemporaryRemovePath(BACKUP_DIR):
        exec_command(
            capfd,
            "restore mariadb",
            "No backup found, the following folder "
            "does not exist: data/backup/mariadb",
        )

    with TemporaryRemovePath(backup_folder):
        exec_command(
            capfd,
            "restore mariadb",
            f"No backup found, the following folder does not exist: {backup_folder}",
        )

        os.mkdir("data/backup/mariadb")

        exec_command(
            capfd,
            "restore mariadb",
            "No backup found, data/backup/mariadb is empty",
        )

        open("data/backup/mariadb/test.tar.gz", "a").close()

        exec_command(
            capfd,
            "restore mariadb",
            "Please specify one of the following backup:",
            "test.tar.gz",
        )

        os.remove("data/backup/mariadb/test.tar.gz")

    files = os.listdir("data/backup/mariadb")
    files = [f for f in files if f.endswith(".tar.gz")]
    files.sort()
    mariadb_dump_file = files[-1]

    exec_command(capfd, "start", "Stack started")

    # Postgres restore not allowed if container is not running
    exec_command(
        capfd,
        f"restore mariadb {mariadb_dump_file}",
        "MariaDB is running and the restore will temporary stop it. "
        "If you want to continue add --force flag",
    )

    # Here we test the restore procedure:
    # 1) verify some data in the database
    exec_command(
        capfd,
        exec_query("select name, description from role"),
        "normal_user\tUser",
    )

    # 2) Modify the data
    exec_command(
        capfd,
        exec_query("update role SET description=name"),
    )
    exec_command(
        capfd,
        exec_query("select name, description from role"),
        "normal_user\tnormal_user",
    )

    # 3) restore the dump
    exec_command(
        capfd,
        f"restore mariadb {mariadb_dump_file} --force",
        "Starting restore on mariadb...",
        "Opening backup file",
        "Removing current datadir",
        "Restoring the backup",
        "...done",
        "completed OK!",
        "Removing the temporary uncompressed folder",
        f"Restore from data/backup/mariadb/{mariadb_dump_file} completed",
    )

    if Configuration.swarm_mode:
        time.sleep(5)

    # 4) verify data match again point 1 (restore completed)
    exec_command(
        capfd,
        exec_query("select name, description from role"),
        "normal_user\tUser",
    )
Example #10
0
def test_all(capfd: Capture, faker: Faker) -> None:

    execute_outside(capfd, "backup neo4j")
    execute_outside(capfd, "restore neo4j")

    backup_folder = BACKUP_DIR.joinpath("neo4j")

    create_project(
        capfd=capfd,
        name=random_project_name(faker),
        auth="neo4j",
        frontend="no",
    )
    init_project(capfd)
    start_registry(capfd)

    exec_command(
        capfd,
        "backup neo4j",
        f"image, execute {colors.RED}rapydo pull neo4j",
    )
    exec_command(
        capfd,
        "restore neo4j",
        f"image, execute {colors.RED}rapydo pull neo4j",
    )

    pull_images(capfd)
    start_project(capfd)

    service_verify(capfd, "neo4j")

    # This will initialize neo4j
    exec_command(capfd, "shell backend 'restapi init'")

    time.sleep(25)
    # Just some delay extra delay. restapi init alone not always is enough...
    if Configuration.swarm_mode:
        time.sleep(30)

    # Verify the initialization
    cypher = "shell neo4j 'bin/cypher-shell"
    exec_command(
        capfd,
        f'{cypher} "match (r: Role) return r.name, r.description"\'',
        '"normal_user", "User"',
    )

    # Backup command
    exec_command(
        capfd,
        "backup neo4j",
        "Neo4j is running and the backup will temporary stop it. "
        "If you want to continue add --force flag",
    )
    exec_command(
        capfd,
        "backup neo4j --force --restart backend --restart rabbit",
        "Starting backup on neo4j...",
        "Backup completed: data/backup/neo4j/",
        "Restarting services in 20 seconds...",
        "Restarting services in 10 seconds...",
    )
    # This is to verify that --force restarted neo4j
    exec_command(
        capfd,
        "backup neo4j",
        "Neo4j is running and the backup will temporary stop it. "
        "If you want to continue add --force flag",
    )

    exec_command(
        capfd,
        "backup invalid",
        "Invalid value for",
        "'invalid' is not one of 'mariadb', 'neo4j', 'postgres', 'rabbit', 'redis'",
    )

    exec_command(capfd, "remove", "Stack removed")

    exec_command(
        capfd,
        "backup neo4j",
        "Starting backup on neo4j...",
        "Backup completed: data/backup/neo4j/",
    )

    # Test backup retention
    exec_command(
        capfd,
        "backup neo4j --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on neo4j...",
        "Backup completed: data/backup/neo4j/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup neo4j --max 999 --dry-run",
        "Dry run mode is enabled",
        "Found 2 backup files, maximum not reached",
        "Starting backup on neo4j...",
        "Backup completed: data/backup/neo4j/",
    )

    exec_command(
        capfd,
        "backup neo4j --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on neo4j...",
        "Backup completed: data/backup/neo4j/",
    )
    # Verify that due to dry run, no backup is executed
    exec_command(
        capfd,
        "backup neo4j --max 1 --dry-run",
        "Dry run mode is enabled",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on neo4j...",
        "Backup completed: data/backup/neo4j/",
    )

    # Create an additional backup to the test deletion (now backups are 3)
    exec_command(
        capfd,
        "backup neo4j",
        "Starting backup on neo4j...",
        "Backup completed: data/backup/neo4j/",
    )

    # Save the current number of backup files
    number_of_backups = len(list(backup_folder.glob("*")))

    # Verify the deletion
    exec_command(
        capfd,
        "backup neo4j --max 1",
        "deleted because exceeding the max number of backup files (1)",
        "Starting backup on neo4j...",
        "Backup completed: data/backup/neo4j/",
    )

    # Now the number of backups should be reduced by 1 (i.e. +1 -2)
    assert len(list(backup_folder.glob("*"))) == number_of_backups - 1

    # Verify that --max ignores files without the date pattern
    backup_folder.joinpath("xyz").touch(exist_ok=True)
    backup_folder.joinpath("xyz.ext").touch(exist_ok=True)
    backup_folder.joinpath("2020_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_01").touch(exist_ok=True)
    backup_folder.joinpath("9999_01_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_99_01-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_99-01_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-99_01_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_99_01.bak").touch(exist_ok=True)
    backup_folder.joinpath("2020_01_01-01_01_99.bak").touch(exist_ok=True)

    exec_command(
        capfd,
        "backup neo4j --max 999 --dry-run",
        "Dry run mode is enabled",
        # Still finding 2, all files above are ignore because not matching the pattern
        "Found 2 backup files, maximum not reached",
        "Starting backup on neo4j...",
        "Backup completed: data/backup/neo4j/",
    )

    exec_command(capfd, "start", "Stack started")

    # Just some delay extra delay, neo4j is a slow starter
    time.sleep(25)

    # Restore command
    exec_command(capfd, "restore neo4j",
                 "Please specify one of the following backup:", ".dump")

    exec_command(
        capfd,
        "restore neo4j invalid",
        "Invalid backup file, data/backup/neo4j/invalid does not exist",
    )

    with TemporaryRemovePath(BACKUP_DIR):
        exec_command(
            capfd,
            "restore neo4j",
            "No backup found, the following folder "
            "does not exist: data/backup/neo4j",
        )

    with TemporaryRemovePath(backup_folder):
        exec_command(
            capfd,
            "restore neo4j",
            f"No backup found, the following folder does not exist: {backup_folder}",
        )

        os.mkdir("data/backup/neo4j")

        exec_command(
            capfd,
            "restore neo4j",
            "No backup found, data/backup/neo4j is empty",
        )

        open("data/backup/neo4j/test.gz", "a").close()

        exec_command(
            capfd,
            "restore neo4j",
            "No backup found, data/backup/neo4j is empty",
        )

        open("data/backup/neo4j/test.dump", "a").close()

        exec_command(
            capfd,
            "restore neo4j",
            "Please specify one of the following backup:",
            "test.dump",
        )

        os.remove("data/backup/neo4j/test.gz")
        os.remove("data/backup/neo4j/test.dump")

    # Test restore on neo4j (required neo4j to be down)
    files = os.listdir("data/backup/neo4j")
    files = [f for f in files if f.endswith(".dump")]
    files.sort()
    neo4j_dump_file = files[-1]

    time.sleep(25)

    # Here we test the restore procedure:
    # 1) verify some data in the database
    exec_command(
        capfd,
        f'{cypher} "match (r: Role) return r.name, r.description"\'',
        '"normal_user", "User"',
    )

    # 2) Modify the data
    exec_command(capfd,
                 f'{cypher} "match (r: Role) SET r.description = r.name"\'')
    exec_command(
        capfd,
        f'{cypher} "match (r: Role) return r.name, r.description"\'',
        '"normal_user", "normal_user"',
    )
    exec_command(capfd, "remove")

    # 3) restore the dump
    exec_command(
        capfd,
        f"restore neo4j {neo4j_dump_file}",
        "Starting restore on neo4j...",
        "Done: ",
        f"Restore from data/backup/neo4j/{neo4j_dump_file} completed",
    )

    exec_command(capfd, "start", "Stack started")

    exec_command(
        capfd,
        f"restore neo4j {neo4j_dump_file}",
        "Neo4j is running and the restore will temporary stop it.",
        "If you want to continue add --force flag",
    )

    exec_command(
        capfd,
        f"restore neo4j {neo4j_dump_file} --force --restart backend",
        "Starting restore on neo4j...",
        "Done: ",
        f"Restore from data/backup/neo4j/{neo4j_dump_file} completed",
        "Restarting services in 20 seconds...",
        "Restarting services in 10 seconds...",
    )

    # Wait neo4j to completely startup
    service_verify(capfd, "neo4j")

    # 4) verify data match again point 1 (restore completed)
    exec_command(
        capfd,
        f'{cypher} "match (r: Role) return r.name, r.description"\'',
        '"normal_user", "User"',
    )