def test_base(capfd: Capture) -> None: execute_outside(capfd, "update") create_project( capfd=capfd, name="third", auth="postgres", frontend="angular", ) init_project(capfd) # Skipping main because we are on a fake git repository exec_command( capfd, "update -i main", "All updated", ) open("submodules/do/temp.file", "a").close() with open("submodules/do/setup.py", "a") as f: f.write("# added from tests\n") exec_command( capfd, "update -i main", "Unable to update do repo, you have unstaged files", "Untracked files:", "submodules/do/temp.file", "Changes not staged for commit:", "submodules/do/setup.py", "Can't continue with updates", )
def test_dump(capfd: Capture, faker: Faker) -> None: execute_outside(capfd, "dump") create_project( capfd=capfd, name=random_project_name(faker), auth="postgres", frontend="no", ) init_project(capfd) exec_command( capfd, "dump", "Config dump: docker-compose.yml", )
def test_password(capfd: Capture, faker: Faker) -> None: execute_outside(capfd, "password") project_name = random_project_name(faker) create_project( capfd=capfd, name=project_name, auth="postgres", frontend="no", ) init_project(capfd) start_registry(capfd) exec_command( capfd, "password backend", "Please specify one between --random and --password options", )
def test_join(capfd: Capture) -> None: if not Configuration.swarm_mode: return None execute_outside(capfd, "join") create_project(capfd=capfd, name="myname", auth="postgres", frontend="no") init_project(capfd) exec_command( capfd, "join", "To add a worker to this swarm, run the following command:", "docker swarm join --token ", ) exec_command( capfd, "join --manager", "To add a manager to this swarm, run the following command:", "docker swarm join --token ", )
def test_debug_run(capfd: Capture, faker: Faker) -> None: execute_outside(capfd, "run backend") create_project( capfd=capfd, name=random_project_name(faker), auth="no", frontend="no", ) init_project(capfd) start_registry(capfd) exec_command( capfd, "volatile backend", "Volatile command is replaced by rapydo run --debug backend", ) img = f"rapydo/backend:{__version__}" exec_command( capfd, "run --debug backend", f"Missing {img} image, add {colors.RED}--pull{colors.RESET} option", ) pull_images(capfd) # start_project(capfd) # exec_command( # capfd, # "run --debug backend --command hostname", # "Bind for 0.0.0.0:8080 failed: port is already allocated", # ) # exec_command( # capfd, # "remove", # "Stack removed", # ) exec_command( capfd, "run backend --command hostname", "Can't specify a command if debug mode is OFF", ) exec_command( capfd, "run backend --command hostname --user developer", "Can't specify a user if debug mode is OFF", ) exec_command( capfd, "run --debug backend --command hostname", "backend-server", ) exec_command( capfd, "run --debug backend --command whoami", "root", ) exec_command( capfd, "run --debug backend -u developer --command whoami", "Please remember that users in volatile containers are not mapped on current ", "developer", ) exec_command( capfd, "run --debug backend -u invalid --command whoami", "Error response from daemon:", "unable to find user invalid:", "no matching entries in passwd file", )
def test_all(capfd: Capture, faker: Faker) -> None: execute_outside(capfd, "ssl") project = random_project_name(faker) create_project( capfd=capfd, name=project, auth="neo4j", frontend="no", services=["rabbit"], ) pconf = f"projects/{project}/project_configuration.yaml" exec_command( capfd, "--prod init -f", "Created default .projectrc file", "Project initialized", ) start_registry(capfd) exec_command( capfd, "ssl", f"image, execute {colors.RED}rapydo pull proxy", ) exec_command( capfd, "--prod pull --quiet", "Base images pulled from docker hub", ) exec_command( capfd, "ssl", "The proxy is not running, start your stack or try with " f"{colors.RED}rapydo ssl --volatile", ) # Before creating SSL certificates rabbit and neo4j should not be able to start exec_command( capfd, "run --debug rabbit", "SSL mandatory file not found: /ssl/real/fullchain1.pem", ) exec_command( capfd, "run --debug neo4j", "SSL mandatory file not found: /ssl/real/fullchain1.pem", ) exec_command( capfd, "ssl --volatile", "Creating a self signed SSL certificate", "Self signed SSL certificate successfully created", # Just to verify that the default does not change "Generating DH parameters, 1024 bit long safe prime, generator 2", ) # Start to verify certificate creation while services are running exec_command( capfd, "--prod start", ) # Needed because the next command requires rabbit already started # Otherwise will fail with: # Error: unable to perform an operation on node 'rabbit@rabbit'. # Please see diagnostics information and suggestions below. if Configuration.swarm_mode: # 60!? :| It still fails after raising to 30... Let's double it!! # 90!? :| It still fails after raising to 60!! time.sleep(90) # DEBUG CODE exec_command(capfd, "logs rabbit") else: time.sleep(5) service_verify(capfd, "rabbitmq") exec_command( capfd, "ssl --no-tty", "--no-tty option is deprecated, you can stop using it", "Creating a self signed SSL certificate", "Self signed SSL certificate successfully created", "Neo4j is running, a full restart is needed. NOT IMPLEMENTED YET.", "RabbitMQ is running, executing command to refresh the certificate", "New certificate successfully enabled", ) exec_command( capfd, "ssl --chain-file /file", "Invalid chain file (you provided /file)", ) exec_command( capfd, "ssl --key-file /file", "Invalid chain file (you provided none)", ) exec_command( capfd, f"ssl --chain-file {pconf}", "Invalid key file (you provided none)", ) exec_command( capfd, f"ssl --chain-file {pconf} --key-file /file", "Invalid key file (you provided /file)", ) exec_command( capfd, f"ssl --chain-file {pconf} --key-file {pconf}", "Unable to automatically perform the requested operation", "You can execute the following commands by your-self:", )
def test_docker_registry(capfd: Capture, faker: Faker) -> None: execute_outside(capfd, "run registry") if Configuration.swarm_mode: execute_outside(capfd, "images") create_project( capfd=capfd, name=random_project_name(faker), auth="no", frontend="no", services=["rabbit"], ) init_project(capfd) if not Configuration.swarm_mode: exec_command( capfd, "run registry", "Can't start the registry in compose mode", ) return None exec_command( capfd, "pull backend", "Registry 127.0.0.1:5000 not reachable. " f"You can start it with {colors.RED}rapydo run registry", ) exec_command( capfd, "build backend", "Registry 127.0.0.1:5000 not reachable. " f"You can start it with {colors.RED}rapydo run registry", ) exec_command( capfd, "start backend", "Registry 127.0.0.1:5000 not reachable. " f"You can start it with {colors.RED}rapydo run registry", ) exec_command( capfd, "images", "Registry 127.0.0.1:5000 not reachable. " f"You can start it with {colors.RED}rapydo run registry", ) exec_command( capfd, "registry", "Registry command is replaced by rapydo run registry", ) img = f"rapydo/registry:{__version__}" exec_command( capfd, "run registry", f"Missing {img} image, add {colors.RED}--pull{colors.RESET} option", ) exec_command( capfd, "run registry --pull", "Running registry...", ) time.sleep(2) exec_command( capfd, "images", "This registry contains no images", ) exec_command( capfd, "pull backend", "Base images pulled from docker hub and pushed into the local registry", ) exec_command( capfd, "images", "This registry contains 1 image(s):", "rapydo/backend", ) exec_command( capfd, "pull rabbit", "Base images pulled from docker hub and pushed into the local registry", ) exec_command( capfd, "images", "This registry contains 2 image(s):", "rapydo/backend", "rapydo/rabbitmq", ) exec_command( capfd, "run registry", "The registry is already running at 127.0.0.1:5000", ) exec_command( capfd, "-e REGISTRY_PORT=5001 run registry", "The registry container is already existing, removing", ) exec_command( capfd, "images --remove invalid", "Some of the images that you specified are not found in this registry", ) # Copied from images.py docker = Docker() registry = docker.registry.get_host() host = f"https://{registry}" r = docker.registry.send_request(f"{host}/v2/_catalog") catalog = r.json() assert "repositories" in catalog assert "rapydo/backend" in catalog["repositories"] r = docker.registry.send_request(f"{host}/v2/rapydo/backend/tags/list") tags_list = r.json() assert "name" in tags_list assert tags_list["name"] == "rapydo/backend" assert "tags" in tags_list assert __version__ in tags_list["tags"] exec_command( capfd, f"images --remove rapydo/backend:{__version__}", f"Image rapydo/backend:{__version__} deleted from ", "Executing registry garbage collector...", "Registry garbage collector successfully executed", "Registry restarted to clean the layers cache", ) time.sleep(1) r = docker.registry.send_request(f"{host}/v2/_catalog") catalog = r.json() assert "repositories" in catalog # After the delete the repository is still in the catalog but with no tag associated assert "rapydo/backend" in catalog["repositories"] r = docker.registry.send_request(f"{host}/v2/rapydo/backend/tags/list") tags_list = r.json() assert "name" in tags_list assert tags_list["name"] == "rapydo/backend" assert "tags" in tags_list # No tags associated to this repository assert tags_list["tags"] is None exec_command( capfd, f"images --remove rapydo/backend:{__version__}", "Some of the images that you specified are not found in this registry", ) exec_command( capfd, "images", "This registry contains 1 image(s):", "rapydo/rabbitmq", ) exec_command( capfd, f"images --remove rapydo/backend:{__version__}", "Some of the images that you specified are not found in this registry", ) exec_command( capfd, f"images --remove rapydo/rabbitmq:{__version__}", f"Image rapydo/rabbitmq:{__version__} deleted from ", "Executing registry garbage collector...", "Registry garbage collector successfully executed", "Registry restarted to clean the layers cache", ) exec_command( capfd, f"images --remove rapydo/rabbitmq:{__version__}", "This registry contains no images", )
def test_interfaces(capfd: Capture, faker: Faker) -> None: execute_outside(capfd, "run adminer") project_name = random_project_name(faker) create_project( capfd=capfd, name=project_name, auth="postgres", frontend="no", ) init_project(capfd) start_registry(capfd) exec_command( capfd, "interfaces sqlalchemy", "Deprecated interface sqlalchemy, use adminer instead", ) exec_command( capfd, "interfaces adminer", "Interfaces command is replaced by rapydo run adminer", ) exec_command( capfd, "run invalid", "Services misconfiguration, can't find invalid", ) exec_command( capfd, "run adminer --port XYZ", "Invalid value for '--port' / '-p': 'XYZ' is not a valid integer", ) img = f"rapydo/adminer:{__version__}" exec_command( capfd, "run adminer", f"Missing {img} image, add {colors.RED}--pull{colors.RESET} option", ) # Launch Adminer UI with default port exec_command( capfd, "run adminer --pull --detach", "Pulling image for adminer...", # f"Creating {project_name}_adminer_run", "You can access Adminer interface on: http://localhost:7777", ) # Admin or SwaggerUI does not start? You can debug with: # from python_on_whales import docker # assert docker.logs("adminer", tail=10) == "debug" exec_command( capfd, "remove adminer", "Service adminer removed", ) # Launch Adminer UI with custom port exec_command( capfd, "run adminer --port 3333 --detach", # "Pulling adminer", # f"Creating {project_name}_adminer_run", "You can access Adminer interface on: http://localhost:3333", ) # Launch Swagger UI with default port exec_command( capfd, "run swaggerui --pull --detach", "Pulling image for swaggerui...", "You can access SwaggerUI web page here: http://localhost:7777", ) exec_command( capfd, "remove swaggerui", "Service swaggerui removed", ) # Launch Swagger UI with custom port exec_command( capfd, "run swaggerui --port 4444 --detach", "You can access SwaggerUI web page here: http://localhost:4444", ) # This fails if the interfaces are non running, i.e. in case of a post-start crash # Introduced after a BUG due to the tty setting in volatile container # that made run interfaces fail on GA exec_command( capfd, "remove adminer swaggerui", "Service adminer removed", "Service swaggerui removed", ) # Test Swagger UI and Admin in production mode exec_command( capfd, "--prod init -f", "Created default .projectrc file", "Project initialized", ) exec_command( capfd, "--prod run swaggerui --port 5555 --detach", "You can access SwaggerUI web page here: https://localhost:5555", ) exec_command( capfd, "--prod run adminer --port 6666 --detach", "You can access Adminer interface on: https://localhost:6666", )
def test_all(capfd: Capture, faker: Faker) -> None: execute_outside(capfd, "backup neo4j") execute_outside(capfd, "restore neo4j") backup_folder = BACKUP_DIR.joinpath("neo4j") create_project( capfd=capfd, name=random_project_name(faker), auth="neo4j", frontend="no", ) init_project(capfd) start_registry(capfd) exec_command( capfd, "backup neo4j", f"image, execute {colors.RED}rapydo pull neo4j", ) exec_command( capfd, "restore neo4j", f"image, execute {colors.RED}rapydo pull neo4j", ) pull_images(capfd) start_project(capfd) service_verify(capfd, "neo4j") # This will initialize neo4j exec_command(capfd, "shell backend 'restapi init'") time.sleep(25) # Just some delay extra delay. restapi init alone not always is enough... if Configuration.swarm_mode: time.sleep(30) # Verify the initialization cypher = "shell neo4j 'bin/cypher-shell" exec_command( capfd, f'{cypher} "match (r: Role) return r.name, r.description"\'', '"normal_user", "User"', ) # Backup command exec_command( capfd, "backup neo4j", "Neo4j is running and the backup will temporary stop it. " "If you want to continue add --force flag", ) exec_command( capfd, "backup neo4j --force --restart backend --restart rabbit", "Starting backup on neo4j...", "Backup completed: data/backup/neo4j/", "Restarting services in 20 seconds...", "Restarting services in 10 seconds...", ) # This is to verify that --force restarted neo4j exec_command( capfd, "backup neo4j", "Neo4j is running and the backup will temporary stop it. " "If you want to continue add --force flag", ) exec_command( capfd, "backup invalid", "Invalid value for", "'invalid' is not one of 'mariadb', 'neo4j', 'postgres', 'rabbit', 'redis'", ) exec_command(capfd, "remove", "Stack removed") exec_command( capfd, "backup neo4j", "Starting backup on neo4j...", "Backup completed: data/backup/neo4j/", ) # Test backup retention exec_command( capfd, "backup neo4j --max 999 --dry-run", "Dry run mode is enabled", "Found 2 backup files, maximum not reached", "Starting backup on neo4j...", "Backup completed: data/backup/neo4j/", ) # Verify that due to dry run, no backup is executed exec_command( capfd, "backup neo4j --max 999 --dry-run", "Dry run mode is enabled", "Found 2 backup files, maximum not reached", "Starting backup on neo4j...", "Backup completed: data/backup/neo4j/", ) exec_command( capfd, "backup neo4j --max 1 --dry-run", "Dry run mode is enabled", "deleted because exceeding the max number of backup files (1)", "Starting backup on neo4j...", "Backup completed: data/backup/neo4j/", ) # Verify that due to dry run, no backup is executed exec_command( capfd, "backup neo4j --max 1 --dry-run", "Dry run mode is enabled", "deleted because exceeding the max number of backup files (1)", "Starting backup on neo4j...", "Backup completed: data/backup/neo4j/", ) # Create an additional backup to the test deletion (now backups are 3) exec_command( capfd, "backup neo4j", "Starting backup on neo4j...", "Backup completed: data/backup/neo4j/", ) # Save the current number of backup files number_of_backups = len(list(backup_folder.glob("*"))) # Verify the deletion exec_command( capfd, "backup neo4j --max 1", "deleted because exceeding the max number of backup files (1)", "Starting backup on neo4j...", "Backup completed: data/backup/neo4j/", ) # Now the number of backups should be reduced by 1 (i.e. +1 -2) assert len(list(backup_folder.glob("*"))) == number_of_backups - 1 # Verify that --max ignores files without the date pattern backup_folder.joinpath("xyz").touch(exist_ok=True) backup_folder.joinpath("xyz.ext").touch(exist_ok=True) backup_folder.joinpath("2020_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_01").touch(exist_ok=True) backup_folder.joinpath("9999_01_01-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_99_01-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_99-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-99_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_99_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_99.bak").touch(exist_ok=True) exec_command( capfd, "backup neo4j --max 999 --dry-run", "Dry run mode is enabled", # Still finding 2, all files above are ignore because not matching the pattern "Found 2 backup files, maximum not reached", "Starting backup on neo4j...", "Backup completed: data/backup/neo4j/", ) exec_command(capfd, "start", "Stack started") # Just some delay extra delay, neo4j is a slow starter time.sleep(25) # Restore command exec_command(capfd, "restore neo4j", "Please specify one of the following backup:", ".dump") exec_command( capfd, "restore neo4j invalid", "Invalid backup file, data/backup/neo4j/invalid does not exist", ) with TemporaryRemovePath(BACKUP_DIR): exec_command( capfd, "restore neo4j", "No backup found, the following folder " "does not exist: data/backup/neo4j", ) with TemporaryRemovePath(backup_folder): exec_command( capfd, "restore neo4j", f"No backup found, the following folder does not exist: {backup_folder}", ) os.mkdir("data/backup/neo4j") exec_command( capfd, "restore neo4j", "No backup found, data/backup/neo4j is empty", ) open("data/backup/neo4j/test.gz", "a").close() exec_command( capfd, "restore neo4j", "No backup found, data/backup/neo4j is empty", ) open("data/backup/neo4j/test.dump", "a").close() exec_command( capfd, "restore neo4j", "Please specify one of the following backup:", "test.dump", ) os.remove("data/backup/neo4j/test.gz") os.remove("data/backup/neo4j/test.dump") # Test restore on neo4j (required neo4j to be down) files = os.listdir("data/backup/neo4j") files = [f for f in files if f.endswith(".dump")] files.sort() neo4j_dump_file = files[-1] time.sleep(25) # Here we test the restore procedure: # 1) verify some data in the database exec_command( capfd, f'{cypher} "match (r: Role) return r.name, r.description"\'', '"normal_user", "User"', ) # 2) Modify the data exec_command(capfd, f'{cypher} "match (r: Role) SET r.description = r.name"\'') exec_command( capfd, f'{cypher} "match (r: Role) return r.name, r.description"\'', '"normal_user", "normal_user"', ) exec_command(capfd, "remove") # 3) restore the dump exec_command( capfd, f"restore neo4j {neo4j_dump_file}", "Starting restore on neo4j...", "Done: ", f"Restore from data/backup/neo4j/{neo4j_dump_file} completed", ) exec_command(capfd, "start", "Stack started") exec_command( capfd, f"restore neo4j {neo4j_dump_file}", "Neo4j is running and the restore will temporary stop it.", "If you want to continue add --force flag", ) exec_command( capfd, f"restore neo4j {neo4j_dump_file} --force --restart backend", "Starting restore on neo4j...", "Done: ", f"Restore from data/backup/neo4j/{neo4j_dump_file} completed", "Restarting services in 20 seconds...", "Restarting services in 10 seconds...", ) # Wait neo4j to completely startup service_verify(capfd, "neo4j") # 4) verify data match again point 1 (restore completed) exec_command( capfd, f'{cypher} "match (r: Role) return r.name, r.description"\'', '"normal_user", "User"', )
def test_all(capfd: Capture, faker: Faker) -> None: execute_outside(capfd, "backup postgres") execute_outside(capfd, "restore postgres") backup_folder = BACKUP_DIR.joinpath("postgres") create_project( capfd=capfd, name=random_project_name(faker), auth="postgres", frontend="no", ) init_project(capfd) start_registry(capfd) exec_command( capfd, "backup postgres", f"image, execute {colors.RED}rapydo pull postgres", ) exec_command( capfd, "restore postgres", f"image, execute {colors.RED}rapydo pull postgres", ) pull_images(capfd) start_project(capfd) exec_command(capfd, "status") service_verify(capfd, "sqlalchemy") # This will initialize postgres exec_command(capfd, "shell backend 'restapi init'") # Verify the initialization psql = "shell postgres 'psql -U sqluser -d SQL_API -c" exec_command( capfd, f'{psql} "select name, description from role"\'', "normal_user | User", ) exec_command( capfd, "backup postgres", "Starting backup on postgres...", "Backup completed: data/backup/postgres/", ) # A second backup is needed to test backup retention exec_command( capfd, "backup postgres", "Starting backup on postgres...", "Backup completed: data/backup/postgres/", ) # Test backup retention exec_command( capfd, "backup postgres --max 999 --dry-run", "Dry run mode is enabled", "Found 2 backup files, maximum not reached", "Starting backup on postgres...", "Backup completed: data/backup/postgres/", ) # Verify that due to dry run, no backup is executed exec_command( capfd, "backup postgres --max 999 --dry-run", "Dry run mode is enabled", "Found 2 backup files, maximum not reached", "Starting backup on postgres...", "Backup completed: data/backup/postgres/", ) exec_command( capfd, "backup postgres --max 1 --dry-run", "Dry run mode is enabled", "deleted because exceeding the max number of backup files (1)", "Starting backup on postgres...", "Backup completed: data/backup/postgres/", ) # Verify that due to dry run, no backup is executed exec_command( capfd, "backup postgres --max 1 --dry-run", "Dry run mode is enabled", "deleted because exceeding the max number of backup files (1)", "Starting backup on postgres...", "Backup completed: data/backup/postgres/", ) # Create an additional backup to the test deletion (now backups are 3) exec_command( capfd, "backup postgres", "Starting backup on postgres...", "Backup completed: data/backup/postgres/", ) # Save the current number of backup files number_of_backups = len(list(backup_folder.glob("*"))) # Verify the deletion exec_command( capfd, "backup postgres --max 1", "deleted because exceeding the max number of backup files (1)", "Starting backup on postgres...", "Backup completed: data/backup/postgres/", ) # Now the number of backups should be reduced by 1 (i.e. +1 -2) assert len(list(backup_folder.glob("*"))) == number_of_backups - 1 # Verify that --max ignores files without the date pattern backup_folder.joinpath("xyz").touch(exist_ok=True) backup_folder.joinpath("xyz.ext").touch(exist_ok=True) backup_folder.joinpath("2020_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_01").touch(exist_ok=True) backup_folder.joinpath("9999_01_01-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_99_01-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_99-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-99_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_99_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_99.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_99.gz").touch(exist_ok=True) exec_command( capfd, "backup postgres --max 999 --dry-run", "Dry run mode is enabled", # Still finding 2, all files above are ignore because not matching the pattern "Found 2 backup files, maximum not reached", "Starting backup on postgres...", "Backup completed: data/backup/postgres/", ) exec_command( capfd, "backup invalid", "Invalid value for", "'invalid' is not one of 'mariadb', 'neo4j', 'postgres', 'rabbit', 'redis'", ) exec_command(capfd, "remove", "Stack removed") exec_command( capfd, "backup postgres", "The backup procedure requires postgres running, please start your stack", ) exec_command( capfd, "restore postgres", "Please specify one of the following backup:", ".sql.gz", ) exec_command( capfd, "restore postgres invalid", "Invalid backup file, data/backup/postgres/invalid does not exist", ) with TemporaryRemovePath(BACKUP_DIR): exec_command( capfd, "restore postgres", "No backup found, the following folder " "does not exist: data/backup/postgres", ) with TemporaryRemovePath(backup_folder): exec_command( capfd, "restore postgres", f"No backup found, the following folder does not exist: {backup_folder}", ) os.mkdir("data/backup/postgres") exec_command( capfd, "restore postgres", "No backup found, data/backup/postgres is empty", ) open("data/backup/postgres/test.sql.gz", "a").close() exec_command( capfd, "restore postgres", "Please specify one of the following backup:", "test.sql.gz", ) os.remove("data/backup/postgres/test.sql.gz") files = os.listdir("data/backup/postgres") files = [f for f in files if f.endswith(".sql.gz")] files.sort() postgres_dump_file = files[-1] # Postgres restore not allowed if container is not running exec_command( capfd, f"restore postgres {postgres_dump_file}", "The restore procedure requires postgres running, please start your stack", ) exec_command(capfd, "start", "Stack started") # Here we test the restore procedure: # 1) verify some data in the database exec_command( capfd, f'{psql} "select name, description from role"\'', "normal_user | User", ) # 2) Modify the data exec_command( capfd, f'{psql} "update role SET description=name"\'', ) exec_command( capfd, f'{psql} "select name, description from role"\'', "normal_user | normal_user", ) # 3) restore the dump exec_command( capfd, f"restore postgres {postgres_dump_file}", "Starting restore on postgres...", "CREATE DATABASE", "ALTER DATABASE", f"Restore from data/backup/postgres/{postgres_dump_file} completed", ) # 4) verify data match again point 1 (restore completed) exec_command( capfd, f'{psql} "select name, description from role"\'', "normal_user | User", )
def test_all(capfd: Capture, faker: Faker) -> None: execute_outside(capfd, "backup rabbit") execute_outside(capfd, "restore rabbit") backup_folder = BACKUP_DIR.joinpath("rabbit") create_project( capfd=capfd, name=random_project_name(faker), auth="no", frontend="no", services=["rabbit"], ) init_project(capfd) start_registry(capfd) exec_command( capfd, "backup rabbit", f"image, execute {colors.RED}rapydo pull rabbit", ) exec_command( capfd, "restore rabbit", f"image, execute {colors.RED}rapydo pull rabbit", ) pull_images(capfd) start_project(capfd) exec_command(capfd, "status") service_verify(capfd, "rabbitmq") # Just some delay extra delay, rabbit is a slow starter time.sleep(5) # NOTE: q = rabbitmq.__name__ is just to have a fixed name to be used to test the # queue without the need to introdure further nested " or ' query_queue = "shell backend \"/usr/bin/python3 -c 'from restapi.connectors import rabbitmq; q = rabbitmq.__name__; r = rabbitmq.get_instance();print(q, r.queue_exists(q));'\"" create_queue = "shell backend \"/usr/bin/python3 -c 'from restapi.connectors import rabbitmq; q = rabbitmq.__name__; r = rabbitmq.get_instance(); r.create_queue(q);'\"" delete_queue = "shell backend \"/usr/bin/python3 -c 'from restapi.connectors import rabbitmq; q = rabbitmq.__name__; r = rabbitmq.get_instance(); r.delete_queue(q);'\"" exec_command(capfd, query_queue, "restapi.connectors.rabbitmq False") exec_command( capfd, create_queue, ) exec_command(capfd, query_queue, "restapi.connectors.rabbitmq True") # Backup command exec_command( capfd, "backup rabbit", "RabbitMQ is running and the backup will temporary stop it. " "If you want to continue add --force flag", ) exec_command( capfd, "backup rabbit --force --restart backend", "Starting backup on rabbit...", "Backup completed: data/backup/rabbit/", "Restarting services in 20 seconds...", "Restarting services in 10 seconds...", ) # This is to verify that --force restarted rabbit exec_command( capfd, "backup rabbit", "RabbitMQ is running and the backup will temporary stop it. " "If you want to continue add --force flag", ) exec_command( capfd, "backup invalid", "Invalid value for", "'invalid' is not one of 'mariadb', 'neo4j', 'postgres', 'rabbit', 'redis'", ) exec_command(capfd, "remove", "Stack removed") exec_command( capfd, "backup rabbit", "Starting backup on rabbit...", "Backup completed: data/backup/rabbit/", ) # Test backup retention exec_command( capfd, "backup rabbit --max 999 --dry-run", "Dry run mode is enabled", "Found 2 backup files, maximum not reached", "Starting backup on rabbit...", "Backup completed: data/backup/rabbit/", ) # Verify that due to dry run, no backup is executed exec_command( capfd, "backup rabbit --max 999 --dry-run", "Dry run mode is enabled", "Found 2 backup files, maximum not reached", "Starting backup on rabbit...", "Backup completed: data/backup/rabbit/", ) exec_command( capfd, "backup rabbit --max 1 --dry-run", "Dry run mode is enabled", "deleted because exceeding the max number of backup files (1)", "Starting backup on rabbit...", "Backup completed: data/backup/rabbit/", ) # Verify that due to dry run, no backup is executed exec_command( capfd, "backup rabbit --max 1 --dry-run", "Dry run mode is enabled", "deleted because exceeding the max number of backup files (1)", "Starting backup on rabbit...", "Backup completed: data/backup/rabbit/", ) # Create an additional backup to the test deletion (now backups are 3) exec_command( capfd, "backup rabbit", "Starting backup on rabbit...", "Backup completed: data/backup/rabbit/", ) # Save the current number of backup files number_of_backups = len(list(backup_folder.glob("*"))) # Verify the deletion exec_command( capfd, "backup rabbit --max 1", "deleted because exceeding the max number of backup files (1)", "Starting backup on rabbit...", "Backup completed: data/backup/rabbit/", ) # Now the number of backups should be reduced by 1 (i.e. +1 -2) assert len(list(backup_folder.glob("*"))) == number_of_backups - 1 # Verify that --max ignores files without the date pattern backup_folder.joinpath("xyz").touch(exist_ok=True) backup_folder.joinpath("xyz.ext").touch(exist_ok=True) backup_folder.joinpath("2020_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_01").touch(exist_ok=True) backup_folder.joinpath("9999_01_01-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_99_01-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_99-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-99_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_99_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_99.bak").touch(exist_ok=True) exec_command( capfd, "backup rabbit --max 999 --dry-run", "Dry run mode is enabled", # Still finding 2, all files above are ignore because not matching the pattern "Found 2 backup files, maximum not reached", "Starting backup on rabbit...", "Backup completed: data/backup/rabbit/", ) exec_command(capfd, "start backend rabbit") # Just some delay extra delay, rabbit is a slow starter if Configuration.swarm_mode: time.sleep(20) else: time.sleep(10) exec_command( capfd, delete_queue, ) exec_command(capfd, query_queue, "restapi.connectors.rabbitmq False") # Restore command exec_command( capfd, "restore rabbit", "Please specify one of the following backup:", ".tar.gz", ) exec_command( capfd, "restore rabbit invalid", "Invalid backup file, data/backup/rabbit/invalid does not exist", ) with TemporaryRemovePath(BACKUP_DIR): exec_command( capfd, "restore rabbit", "No backup found, the following folder " "does not exist: data/backup/rabbit", ) with TemporaryRemovePath(backup_folder): exec_command( capfd, "restore rabbit", f"No backup found, the following folder does not exist: {backup_folder}", ) os.mkdir("data/backup/rabbit") exec_command( capfd, "restore rabbit", "No backup found, data/backup/rabbit is empty", ) open("data/backup/rabbit/test.gz", "a").close() exec_command( capfd, "restore rabbit", "No backup found, data/backup/rabbit is empty", ) open("data/backup/rabbit/test.tar.gz", "a").close() exec_command( capfd, "restore rabbit", "Please specify one of the following backup:", "test.tar.gz", ) os.remove("data/backup/rabbit/test.gz") os.remove("data/backup/rabbit/test.tar.gz") # Test restore on rabbit (required rabbit to be down) files = os.listdir("data/backup/rabbit") files = [f for f in files if f.endswith(".tar.gz")] files.sort() rabbit_dump_file = files[-1] exec_command(capfd, "remove") # 3) restore the dump exec_command( capfd, f"restore rabbit {rabbit_dump_file}", "Starting restore on rabbit...", f"Restore from data/backup/rabbit/{rabbit_dump_file} completed", ) exec_command(capfd, "start", "Stack started") # 4) verify data match again point 1 (restore completed) # postponed because rabbit needs time to start... exec_command( capfd, f"restore rabbit {rabbit_dump_file}", "RabbitMQ is running and the restore will temporary stop it.", "If you want to continue add --force flag", ) exec_command( capfd, f"restore rabbit {rabbit_dump_file} --force --restart backend", "Starting restore on rabbit...", f"Restore from data/backup/rabbit/{rabbit_dump_file} completed", "Restarting services in 20 seconds...", "Restarting services in 10 seconds...", ) # Wait rabbit to completely startup service_verify(capfd, "rabbitmq") exec_command(capfd, query_queue, "restapi.connectors.rabbitmq True")
def test_base(capfd: Capture, faker: Faker) -> None: execute_outside(capfd, "version") exec_command( capfd, "--version", f"rapydo version: {__version__}", ) project = random_project_name(faker) exec_command( capfd, f"--invalid-option create {project}", "Error: No such option: --invalid-option", ) exec_command(capfd, "rapydo", "Usage") create_project( capfd=capfd, name=project, auth="postgres", frontend="no", ) init_project(capfd) exec_command( capfd, "version", f"rapydo: {colors.GREEN}{__version__}", f"required rapydo: {colors.GREEN}{__version__}", ) folder = os.getcwd() # Tests from a subfolder os.chdir("projects") exec_command( capfd, "check -i main --no-git --no-builds", "You are not in the main folder, please change your working dir", "Found a valid parent folder:", "Suggested command: cd ..", ) os.chdir(project) exec_command( capfd, "check -i main --no-git --no-builds", "You are not in the main folder, please change your working dir", "Found a valid parent folder:", "Suggested command: cd ../..", ) # Tests from outside the folder os.chdir(tempfile.gettempdir()) exec_command( capfd, "check -i main", "You are not in a git repository", "Please note that this command only works from inside a rapydo-like repository", "Verify that you are in the right folder, now you are in:", ) os.chdir(folder) exec_command( capfd, "--remote invalid check -i main --no-git", "Could not resolve hostname invalid: ", ) exec_command( capfd, "--remote invalid@invalid check -i main --no-git", # Temporary failure in name resolution depends by the OS # on alpine che message is: Name does not resolve # "Could not resolve hostname invalid: Temporary failure in name resolution", "Could not resolve hostname invalid: ", ) exec_command( capfd, "-s backend check -i main --no-git --no-builds", # warnings are not catched !? # "-s is replaced by rapydo <command> service", ) exec_command( capfd, "start backend", "Enabled services: backend", ) exec_command( capfd, "start backend postgres", "Enabled services: backend, postgres", ) exec_command( capfd, "start backend postgres _backend", "Enabled services: postgres", ) exec_command( capfd, "start backend postgres _invalid", "No such service: invalid", ) exec_command( capfd, "-e ACTIVATE_FAIL2BAN start fail2ban", "Invalid enviroment, missing value in ACTIVATE_FAIL2BAN", ) Path(PROJECT_DIR, project, "commands").mkdir(exist_ok=True) with open(f"projects/{project}/commands/custom.py", "w+") as f: f.write(""" from controller.app import Application from controller import log @Application.app.command(help="This is a custom command") def custom() -> None: Application.print_command() log.info("Hello from custom command!") """) exec_command( capfd, "custom", "Hello from custom command!", )
def test_all(capfd: Capture) -> None: execute_outside(capfd, "status") create_project( capfd=capfd, name="first", auth="postgres", frontend="no", ) init_project(capfd) start_registry(capfd) pull_images(capfd) if Configuration.swarm_mode: exec_command( capfd, "status", "Manager", "Ready+Active", "No service is running", ) else: exec_command( capfd, "status", "No container is running", ) start_project(capfd) if Configuration.swarm_mode: exec_command( capfd, "status", "Manager", "Ready+Active", "first_backend", "first_postgres", " [1]", # No longer found starting because # HEALTHCHECK_INTERVAL is defaulted to 1s during tests # "starting", "running", ) init_project(capfd, "", "--force") exec_command( capfd, "start --force", "Stack started", ) time.sleep(4) exec_command( capfd, "status", "running", ) exec_command( capfd, "status backend", "running", ) exec_command( capfd, "status backend postgres", "running", ) else: exec_command( capfd, "status", "first-backend-1", ) exec_command( capfd, "status backend", "first-backend-1", ) exec_command( capfd, "status backend postgres", "first-backend-1", )
def test_all(capfd: Capture, faker: Faker) -> None: execute_outside(capfd, "list env") create_project( capfd=capfd, name=random_project_name(faker), auth="postgres", frontend="no", services=["redis"], extra="--env CUSTOMVAR1=mycustomvalue --env CUSTOMVAR2=mycustomvalue", ) init_project(capfd) # Some tests with list exec_command( capfd, "list", "Missing argument 'ELEMENT_TYPE:{env|services|submodules}'. Choose from:", ) exec_command( capfd, "list invalid", "Invalid value for", "'invalid' is not one of 'env', 'services', 'submodules'", ) exec_command( capfd, "list env", "List env variables:", "ACTIVATE_ALCHEMY", "CUSTOMVAR1", "CUSTOMVAR2", "mycustomvalue", ) exec_command( capfd, "list submodules", "List of submodules:", ) exec_command( capfd, "list services", "List of active services:", "backend", "postgres", "redis", "N/A", ) start_registry(capfd) pull_images(capfd) start_project(capfd) exec_command( capfd, "list services", "List of active services:", "backend", "postgres", "redis", "running", )
def test_all(capfd: Capture, faker: Faker) -> None: execute_outside(capfd, "shell backend ls") create_project( capfd=capfd, name="first", auth="no", frontend="angular", services=["redis", "fail2ban"], ) init_project(capfd) start_registry(capfd) pull_images(capfd) start_project(capfd) exec_command( capfd, "shell invalid", "No running container found for invalid service" ) exec_command( capfd, "shell --no-tty backend invalid", "--no-tty option is deprecated, you can stop using it", ) exec_command( capfd, "shell backend invalid", "The command execution was terminated by command cannot be invoked. " "Exit code is 126", ) exec_command( capfd, 'shell backend "bash invalid"', "The command execution was terminated by command not found. " "Exit code is 127", ) exec_command( capfd, "shell backend hostname", "backend-server", ) signal.signal(signal.SIGALRM, signal_handler) signal.alarm(2) exec_command( capfd, "shell backend --default-command", "Time is up", ) # This can't work on GitHub Actions due to the lack of tty # signal.signal(signal.SIGALRM, handler) # signal.alarm(2) # exec_command( # capfd, # "shell backend", # # "developer@backend-server:[/code]", # "Time is up", # ) # Testing default users. I did't include all the containers because: # 1. this will greatly slow down this test for a very small benefit # 2. check the presence of 'postgres' in the output of shell postgres whoami # is trivial because it is always in the output, due to the echo of the command exec_command( capfd, "shell backend whoami", "developer", ) exec_command( capfd, "shell frontend whoami", "node", ) # Added because fail2ban is deployed in global mode, so that the container name is # different and this can make the command to fail # (as happened before the introduction of this test) exec_command( capfd, "shell fail2ban whoami", "root", ) exec_command( capfd, "remove", "Stack removed", ) exec_command( capfd, "shell backend hostname", "Requested command: hostname with user: developer", "No running container found for backend service", ) exec_command( capfd, "shell backend --default", "Requested command: restapi launch with user: developer", "No running container found for backend service", ) exec_command( capfd, "shell backend --replica 1 --default", "Requested command: restapi launch with user: developer", "No running container found for backend service", ) exec_command( capfd, "shell backend --replica 2 --default", "Requested command: restapi launch with user: developer", "Replica number 2 not found for backend service", ) if Configuration.swarm_mode: service = "backend" exec_command( capfd, "start backend", "Stack started", ) exec_command( capfd, "scale backend=2 --wait", "first_backend scaled to 2", "Service converged", ) else: service = "redis" exec_command( capfd, "scale redis=2", "Scaling services: redis=2...", "Services scaled: redis=2", ) docker = Docker() container1 = docker.get_container(service, slot=1) container2 = docker.get_container(service, slot=2) assert container1 is not None assert container2 is not None assert container1 != container2 string1 = faker.pystr(min_chars=30, max_chars=30) string2 = faker.pystr(min_chars=30, max_chars=30) docker.client.container.execute( container1[0], command=["touch", f"/tmp/{string1}"], tty=False, detach=False, ) docker.client.container.execute( container2[0], command=["touch", f"/tmp/{string2}"], tty=False, detach=False, ) exec_command(capfd, f"shell {service} --replica 1 'ls /tmp/'", string1) exec_command(capfd, f"shell {service} --replica 2 'ls /tmp/'", string2) exec_command( capfd, f"shell {service} mycommand --replica 2 --broadcast", "--replica and --broadcast options are not compatible", ) exec_command( capfd, f"shell {service} --broadcast 'ls /tmp/'", string1, string2, ) exec_command( capfd, "remove", "Stack removed", ) exec_command( capfd, f"shell {service} mycommand --broadcast", f"No running container found for {service} service", )
def test_all(capfd: Capture) -> None: execute_outside(capfd, "start") if not Configuration.swarm_mode: execute_outside(capfd, "stop") project_name = "first" create_project( capfd=capfd, name=project_name, auth="neo4j", frontend="angular", ) init_project(capfd) if Configuration.swarm_mode: exec_command( capfd, "start", "Registry 127.0.0.1:5000 not reachable.", ) start_registry(capfd) exec_command( capfd, "start backend invalid", "No such service: invalid", ) exec_command( capfd, "start backend", f"image, execute {colors.RED}rapydo pull backend", ) pull_images(capfd) docker = Docker() if Configuration.swarm_mode: # Deploy a sub-stack exec_command( capfd, "start backend", "Enabled services: backend", "Stack started", ) # Only backend is expected to be running assert docker.get_container("backend") is not None assert docker.get_container("neo4j") is None # Once started a stack in swarm mode, it's not possible # to re-deploy another stack # exec_command( # capfd, # "start", # "A stack is already running", # f"Stop it with {colors.RED}rapydo remove{colors.RESET} " # "if you want to start a new stack", # ) # Deploy an additional sub-stack exec_command( capfd, "start neo4j", "Enabled services: neo4j", "Stack started", ) # In swarm mode new stack replaces the previous # => Only neo4j is expected to be running assert docker.get_container("backend") is None assert docker.get_container("neo4j") is not None exec_command( capfd, "remove", "Stack removed", ) # Deploy the full stack exec_command( capfd, "start", "Stack started", ) # Now both backend and neo4j are expected to be running assert docker.get_container("backend") is not None assert docker.get_container("neo4j") is not None # ############################ # Verify bind volumes checks # # ############################ exec_command( capfd, "remove", "Stack removed", ) data_folder = DATA_DIR.joinpath(project_name) karma_folder = data_folder.joinpath("karma") # Delete data/project_name/karma and it will be recreated assert karma_folder.exists() shutil.rmtree(karma_folder) assert not karma_folder.exists() # set the data folder read only data_folder.chmod(0o550) # The missing folder can't be recreated due to permissions denied exec_command( capfd, "start frontend", "A bind folder is missing and can't be automatically created: ", f"/data/{project_name}/karma", ) assert not karma_folder.exists() # Restore RW permissions data_folder.chmod(0o770) exec_command( capfd, "start frontend", "A bind folder was missing and was automatically created: ", f"/data/{project_name}/karma", "Stack started", ) assert karma_folder.exists() else: # Deploy a sub-stack exec_command( capfd, "start backend", "Enabled services: backend", "Stack started", ) # Only backend is expected to be running assert docker.get_container("backend") is not None assert docker.get_container("neo4j") is None # Deploy an additional sub-stack exec_command( capfd, "start neo4j", "Enabled services: neo4j", "Stack started", ) # In compose mode additional stack are aggregated # => both backend and neo4j are expected to be running assert docker.get_container("backend") is not None assert docker.get_container("neo4j") is not None # exec_command( # capfd, # "start", # "A stack is already running.", # ) exec_command( capfd, "start", "Stack started", )
def test_all(capfd: Capture, faker: Faker) -> None: execute_outside(capfd, "backup redis") execute_outside(capfd, "restore redis") backup_folder = BACKUP_DIR.joinpath("redis") create_project( capfd=capfd, name=random_project_name(faker), auth="no", frontend="no", services=["redis"], ) init_project(capfd) start_registry(capfd) exec_command( capfd, "backup redis", f"image, execute {colors.RED}rapydo pull redis", ) exec_command( capfd, "restore redis", f"image, execute {colors.RED}rapydo pull redis", ) pull_images(capfd) start_project(capfd) service_verify(capfd, "redis") key = faker.pystr() value1 = f"old-{faker.pystr()}" value2 = f"new-{faker.pystr()}" # NOTE: q = redis.__name__ is just to have a fixed name to be used to test the # queue without the need to introdure further nested " or ' get_key = f'shell redis "sh -c \'redis-cli --pass "$REDIS_PASSWORD" get {key}\'"' set_key1 = ( f'shell redis "sh -c \'redis-cli --pass "$REDIS_PASSWORD" set {key} {value1}\'"' ) set_key2 = ( f'shell redis "sh -c \'redis-cli --pass "$REDIS_PASSWORD" set {key} {value2}\'"' ) exec_command( capfd, set_key1, ) exec_command(capfd, get_key, value1) # Backup command on a running Redis exec_command( capfd, "backup redis", "Starting backup on redis...", "Backup completed: data/backup/redis/", ) exec_command( capfd, "backup invalid", "Invalid value for", "'invalid' is not one of 'mariadb', 'neo4j', 'postgres', 'rabbit', 'redis'", ) exec_command(capfd, "remove", "Stack removed") # Backup command on a stopped Redis exec_command( capfd, "backup redis", "Starting backup on redis...", "Backup completed: data/backup/redis/", ) # Test backup retention exec_command( capfd, "backup redis --max 999 --dry-run", "Dry run mode is enabled", "Found 2 backup files, maximum not reached", "Starting backup on redis...", "Backup completed: data/backup/redis/", ) # Verify that due to dry run, no backup is executed exec_command( capfd, "backup redis --max 999 --dry-run", "Dry run mode is enabled", "Found 2 backup files, maximum not reached", "Starting backup on redis...", "Backup completed: data/backup/redis/", ) exec_command( capfd, "backup redis --max 1 --dry-run", "Dry run mode is enabled", "deleted because exceeding the max number of backup files (1)", "Starting backup on redis...", "Backup completed: data/backup/redis/", ) # Verify that due to dry run, no backup is executed exec_command( capfd, "backup redis --max 1 --dry-run", "Dry run mode is enabled", "deleted because exceeding the max number of backup files (1)", "Starting backup on redis...", "Backup completed: data/backup/redis/", ) # Create an additional backup to the test deletion (now backups are 3) exec_command( capfd, "backup redis", "Starting backup on redis...", "Backup completed: data/backup/redis/", ) # Save the current number of backup files number_of_backups = len(list(backup_folder.glob("*"))) # Verify the deletion exec_command( capfd, "backup redis --max 1", "deleted because exceeding the max number of backup files (1)", "Starting backup on redis...", "Backup completed: data/backup/redis/", ) # Now the number of backups should be reduced by 1 (i.e. +1 -2) assert len(list(backup_folder.glob("*"))) == number_of_backups - 1 # Verify that --max ignores files without the date pattern backup_folder.joinpath("xyz").touch(exist_ok=True) backup_folder.joinpath("xyz.ext").touch(exist_ok=True) backup_folder.joinpath("2020_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_01").touch(exist_ok=True) backup_folder.joinpath("9999_01_01-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_99_01-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_99-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-99_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_99_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_99.bak").touch(exist_ok=True) exec_command( capfd, "backup redis --max 999 --dry-run", "Dry run mode is enabled", # Still finding 2, all files above are ignore because not matching the pattern "Found 2 backup files, maximum not reached", "Starting backup on redis...", "Backup completed: data/backup/redis/", ) exec_command(capfd, "start backend redis") exec_command( capfd, set_key2, ) exec_command(capfd, get_key, value2) # Restore command exec_command( capfd, "restore redis", "Please specify one of the following backup:", ".tar.gz", ) exec_command( capfd, "restore redis invalid", "Invalid backup file, data/backup/redis/invalid does not exist", ) with TemporaryRemovePath(BACKUP_DIR): exec_command( capfd, "restore redis", "No backup found, the following folder " "does not exist: data/backup/redis", ) with TemporaryRemovePath(backup_folder): exec_command( capfd, "restore redis", f"No backup found, the following folder does not exist: {backup_folder}", ) os.mkdir("data/backup/redis") exec_command( capfd, "restore redis", "No backup found, data/backup/redis is empty", ) open("data/backup/redis/test.gz", "a").close() exec_command( capfd, "restore redis", "No backup found, data/backup/redis is empty", ) open("data/backup/redis/test.tar.gz", "a").close() exec_command( capfd, "restore redis", "Please specify one of the following backup:", "test.tar.gz", ) os.remove("data/backup/redis/test.gz") os.remove("data/backup/redis/test.tar.gz") # Test restore on redis (required redis to be down) files = os.listdir("data/backup/redis") files = [f for f in files if f.endswith(".tar.gz")] files.sort() redis_dump_file = files[-1] exec_command(capfd, "remove redis") # 3) restore the dump exec_command( capfd, f"restore redis {redis_dump_file}", "Starting restore on redis...", f"Restore from data/backup/redis/{redis_dump_file} completed", ) exec_command(capfd, "start", "Stack started") # 4) verify data match again point 1 (restore completed) # postponed because redis needs time to start... exec_command( capfd, f"restore redis {redis_dump_file}", "Redis is running and the restore will temporary stop it.", "If you want to continue add --force flag", ) exec_command( capfd, f"restore redis {redis_dump_file} --force --restart backend", "Starting restore on redis...", f"Restore from data/backup/redis/{redis_dump_file} completed", ) # Wait redis to completely startup service_verify(capfd, "redis") exec_command(capfd, get_key, value1)
def test_all(capfd: Capture, faker: Faker) -> None: execute_outside(capfd, "backup mariadb") execute_outside(capfd, "restore mariadb") backup_folder = BACKUP_DIR.joinpath("mariadb") create_project( capfd=capfd, name=random_project_name(faker), auth="mysql", frontend="no", ) init_project(capfd) start_registry(capfd) exec_command( capfd, "backup mariadb", f"image, execute {colors.RED}rapydo pull mariadb", ) exec_command( capfd, "restore mariadb", f"image, execute {colors.RED}rapydo pull mariadb", ) pull_images(capfd) start_project(capfd) exec_command(capfd, "status") service_verify(capfd, "sqlalchemy") # This will initialize mariadb exec_command(capfd, "shell backend 'restapi init'") def exec_query(query: str) -> str: command = 'shell mariadb "' command += 'sh -c \'mysql -uroot -p"$MYSQL_ROOT_PASSWORD" -D"$MYSQL_DATABASE" ' command += f'-e \\"{query};\\"' # This is to close the sh -c 'command' command += "'" # This is to close the shell "command" command += '"' return command # Verify the initialization exec_command( capfd, exec_query("select name, description from role"), "normal_user\tUser", ) exec_command( capfd, "backup mariadb", "Starting backup on mariadb...", "Backup completed: data/backup/mariadb/", ) # A second backup is needed to test backup retention exec_command( capfd, "backup mariadb", "Starting backup on mariadb...", "Backup completed: data/backup/mariadb/", ) # Test backup retention exec_command( capfd, "backup mariadb --max 999 --dry-run", "Dry run mode is enabled", "Found 2 backup files, maximum not reached", "Starting backup on mariadb...", "Backup completed: data/backup/mariadb/", ) # Verify that due to dry run, no backup is executed exec_command( capfd, "backup mariadb --max 999 --dry-run", "Dry run mode is enabled", "Found 2 backup files, maximum not reached", "Starting backup on mariadb...", "Backup completed: data/backup/mariadb/", ) exec_command( capfd, "backup mariadb --max 1 --dry-run", "Dry run mode is enabled", "deleted because exceeding the max number of backup files (1)", "Starting backup on mariadb...", "Backup completed: data/backup/mariadb/", ) # Verify that due to dry run, no backup is executed exec_command( capfd, "backup mariadb --max 1 --dry-run", "Dry run mode is enabled", "deleted because exceeding the max number of backup files (1)", "Starting backup on mariadb...", "Backup completed: data/backup/mariadb/", ) # Create an additional backup to the test deletion (now backups are 3) exec_command( capfd, "backup mariadb", "Starting backup on mariadb...", "Backup completed: data/backup/mariadb/", ) # Save the current number of backup files number_of_backups = len(list(backup_folder.glob("*"))) # Verify the deletion exec_command( capfd, "backup mariadb --max 1", "deleted because exceeding the max number of backup files (1)", "Starting backup on mariadb...", "Backup completed: data/backup/mariadb/", ) # Now the number of backups should be reduced by 1 (i.e. +1 -2) assert len(list(backup_folder.glob("*"))) == number_of_backups - 1 # Verify that --max ignores files without the date pattern backup_folder.joinpath("xyz").touch(exist_ok=True) backup_folder.joinpath("xyz.ext").touch(exist_ok=True) backup_folder.joinpath("2020_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_01").touch(exist_ok=True) backup_folder.joinpath("9999_01_01-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_99_01-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_99-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-99_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_99_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_99.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_99.tar").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_99.tar.gz").touch(exist_ok=True) exec_command( capfd, "backup mariadb --max 999 --dry-run", "Dry run mode is enabled", # Still finding 2, all files above are ignore because not matching the pattern "Found 2 backup files, maximum not reached", "Starting backup on mariadb...", "Backup completed: data/backup/mariadb/", ) exec_command( capfd, "backup invalid", "Invalid value for", "'invalid' is not one of 'mariadb', 'neo4j', 'postgres', 'rabbit', 'redis'", ) exec_command(capfd, "remove", "Stack removed") exec_command( capfd, "backup mariadb", "The backup procedure requires mariadb running, please start your stack", ) exec_command( capfd, "restore mariadb", "Please specify one of the following backup:", ".tar.gz", ) exec_command( capfd, "restore mariadb invalid", "Invalid backup file, data/backup/mariadb/invalid does not exist", ) with TemporaryRemovePath(BACKUP_DIR): exec_command( capfd, "restore mariadb", "No backup found, the following folder " "does not exist: data/backup/mariadb", ) with TemporaryRemovePath(backup_folder): exec_command( capfd, "restore mariadb", f"No backup found, the following folder does not exist: {backup_folder}", ) os.mkdir("data/backup/mariadb") exec_command( capfd, "restore mariadb", "No backup found, data/backup/mariadb is empty", ) open("data/backup/mariadb/test.tar.gz", "a").close() exec_command( capfd, "restore mariadb", "Please specify one of the following backup:", "test.tar.gz", ) os.remove("data/backup/mariadb/test.tar.gz") files = os.listdir("data/backup/mariadb") files = [f for f in files if f.endswith(".tar.gz")] files.sort() mariadb_dump_file = files[-1] exec_command(capfd, "start", "Stack started") # Postgres restore not allowed if container is not running exec_command( capfd, f"restore mariadb {mariadb_dump_file}", "MariaDB is running and the restore will temporary stop it. " "If you want to continue add --force flag", ) # Here we test the restore procedure: # 1) verify some data in the database exec_command( capfd, exec_query("select name, description from role"), "normal_user\tUser", ) # 2) Modify the data exec_command( capfd, exec_query("update role SET description=name"), ) exec_command( capfd, exec_query("select name, description from role"), "normal_user\tnormal_user", ) # 3) restore the dump exec_command( capfd, f"restore mariadb {mariadb_dump_file} --force", "Starting restore on mariadb...", "Opening backup file", "Removing current datadir", "Restoring the backup", "...done", "completed OK!", "Removing the temporary uncompressed folder", f"Restore from data/backup/mariadb/{mariadb_dump_file} completed", ) if Configuration.swarm_mode: time.sleep(5) # 4) verify data match again point 1 (restore completed) exec_command( capfd, exec_query("select name, description from role"), "normal_user\tUser", )
def test_scale(capfd: Capture) -> None: execute_outside(capfd, "scale x=1") create_project( capfd=capfd, name="first", auth="postgres", frontend="no", services=["redis"], ) init_project(capfd) # backend, postgres, redis BASE_SERVICE_NUM = 3 if Configuration.swarm_mode: exec_command( capfd, "scale backend=2", "Registry 127.0.0.1:5000 not reachable.", ) start_registry(capfd) # Add the registry BASE_SERVICE_NUM += 1 exec_command( capfd, "scale backend=2", f"image, execute {colors.RED}rapydo pull backend", ) pull_images(capfd) if Configuration.swarm_mode: exec_command( capfd, "scale backend=2", "No such service: first_backend, have you started your stack?", ) start_project(capfd) assert count_running_containers() == BASE_SERVICE_NUM exec_command( capfd, "scale redis=x", "Invalid number of replicas: x", ) if Configuration.swarm_mode: exec_command( capfd, "scale backend=2 --wait", "first_backend scaled to 2", "Service converged", ) assert count_running_containers() == BASE_SERVICE_NUM + 1 exec_command( capfd, "status", " [2]", ) exec_command( capfd, "scale backend", "first_backend scaled to 1", ) # The backend instances are still 2 because the service is not converged yet # (--wait flag was not included in the previous command) assert count_running_containers() == BASE_SERVICE_NUM + 1 # So just sleep for a while to let the service to converge time.sleep(3) assert count_running_containers() == BASE_SERVICE_NUM exec_command( capfd, "-e DEFAULT_SCALE_BACKEND=3 scale backend --wait", "first_backend scaled to 3", "Service converged", ) assert count_running_containers() == BASE_SERVICE_NUM + 2 exec_command( capfd, "status", " [3]", ) with open(".projectrc", "a") as f: f.write("\n DEFAULT_SCALE_BACKEND: 4\n") exec_command( capfd, "scale backend", "first_backend scaled to 4", ) # Just wait for a while for all tasks to start, necessary because the previous # command did not include --wait flag time.sleep(2) assert count_running_containers() == BASE_SERVICE_NUM + 3 # This should restart all the replicas. exec_command( capfd, "start", ) # Verify that 2 replicas are still running after the restart exec_command( capfd, "start --force", ) # Just wait for a while for all tasks to start, necessary because the previous # command did not include --wait flag time.sleep(2) # Still not working # assert count_running_containers() == BASE_SERVICE_NUM + 3 # exec_command( # capfd, # "scale backend=0 --wait", # "first_backend scaled to 0", # ) # assert count_running_containers() == BASE_SERVICE_NUM - 1 exec_command( capfd, "scale redis=2", "Service redis is not guaranteed to support the scale, " "can't accept the request", ) else: exec_command( capfd, "scale redis", "Scaling services: redis=1...", "Services scaled: redis=1", ) assert count_running_containers() == BASE_SERVICE_NUM exec_command( capfd, "-e DEFAULT_SCALE_REDIS=2 scale redis", "Scaling services: redis=2...", "Services scaled: redis=2", ) assert count_running_containers() == BASE_SERVICE_NUM + 1 exec_command( capfd, "scale redis=3", "Scaling services: redis=3...", "Services scaled: redis=3", ) assert count_running_containers() == BASE_SERVICE_NUM + 2 with open(".projectrc", "a") as f: f.write("\n DEFAULT_SCALE_REDIS: 4\n") exec_command( capfd, "scale redis", "Scaling services: redis=4...", "Services scaled: redis=4", ) assert count_running_containers() == BASE_SERVICE_NUM + 3 exec_command( capfd, "scale redis=1", "Scaling services: redis=1...", "Services scaled: redis=1", ) assert count_running_containers() == BASE_SERVICE_NUM exec_command( capfd, "scale redis=2", "Scaling services: redis=2...", "Services scaled: redis=2", ) assert count_running_containers() == BASE_SERVICE_NUM + 1 # This should restart all the replicas. exec_command( capfd, "start", ) # Verify that 2 replicas are still running after the restart exec_command( capfd, "start --force", )
def test_remove(capfd: Capture) -> None: execute_outside(capfd, "remove") create_project( capfd=capfd, name="rem", auth="postgres", frontend="no", ) init_project(capfd, " -e HEALTHCHECK_INTERVAL=20s ") start_registry(capfd) pull_images(capfd) if Configuration.swarm_mode: # In swarm mode single service remove is not permitted if nothing is running exec_command( capfd, "remove postgres", f"Stack rem is not running, deploy it with {colors.RED}rapydo start", ) # Even if nothing is running, remove is permitted both on Compose and Swarm exec_command(capfd, "remove", "Stack removed") NONE: List[str] = [] if Configuration.swarm_mode: BACKEND_ONLY = ["rem_backend"] ALL = ["rem_backend", "rem_postgres"] else: BACKEND_ONLY = ["rem-backend"] ALL = ["rem-backend", "rem-postgres"] assert get_containers() == NONE start_project(capfd) if Configuration.swarm_mode: NETWORK_NAME = "rem_swarm_default" else: NETWORK_NAME = "rem_compose_default" assert get_containers() == ALL NAMED_VOLUMES_NUM, UNNAMED_VOLUMES_NUM = count_volumes() if Configuration.swarm_mode: # In swarm mode remove single service is equivalent to scale 0 exec_command( capfd, "remove postgres", "rem_postgres scaled to 0", "verify: Service converged", "Services removed", ) assert get_containers() == BACKEND_ONLY # Single service remove does not remove the network assert NETWORK_NAME in get_networks() # Single service remove also remove unnamed volumes time.sleep(2) n, u = count_volumes() assert NAMED_VOLUMES_NUM == n assert UNNAMED_VOLUMES_NUM > u exec_command( capfd, "start", "Stack started", ) time.sleep(2) assert get_containers() == ALL NAMED_VOLUMES_NUM, UNNAMED_VOLUMES_NUM = count_volumes() exec_command( capfd, "remove", "Stack removed", ) assert get_containers() == NONE # Removal of all services also drop the network assert NETWORK_NAME not in get_networks() # Removal of all services also remove unnamed volumes n, u = count_volumes() assert NAMED_VOLUMES_NUM == n assert UNNAMED_VOLUMES_NUM > u else: exec_command( capfd, "remove postgres", "Stack removed", ) assert get_containers() == BACKEND_ONLY # Single service remove does not remove the network assert NETWORK_NAME in get_networks() # Removal of all services does not remove any volume n, u = count_volumes() assert NAMED_VOLUMES_NUM == n assert UNNAMED_VOLUMES_NUM == u exec_command( capfd, "remove", "Stack removed", ) assert get_containers() == NONE # Removal of all services also drop the network # assert NETWORK_NAME not in get_networks() # Networks are not removed, but based on docker compose down --help they should # Also docker-compose down removes network from what I remember # Should be reported as bug? If corrected this check will start to fail assert NETWORK_NAME in get_networks() # Removal of all services does not remove any volume n, u = count_volumes() assert NAMED_VOLUMES_NUM == n assert UNNAMED_VOLUMES_NUM == u start_project(capfd) assert get_containers() == ALL exec_command( capfd, "remove --all postgres", "Stack removed", ) assert get_containers() == BACKEND_ONLY # Removal of all services with --all flag remove unnamed volumes n, u = count_volumes() assert NAMED_VOLUMES_NUM == n # This locally works... but not on GA ... mistery # assert UNNAMED_VOLUMES_NUM > u # New counts, after single service --all has removed some unnamed volume NAMED_VOLUMES_NUM, UNNAMED_VOLUMES_NUM = count_volumes() exec_command(capfd, "remove --all", "Stack removed") assert get_containers() == NONE n, u = count_volumes() assert NAMED_VOLUMES_NUM > n assert UNNAMED_VOLUMES_NUM > u if Configuration.swarm_mode: # Remove the registry exec_command( capfd, "remove registry", "Service registry removed", ) # Verify that the registry is no longer running exec_command( capfd, "start", "Registry 127.0.0.1:5000 not reachable.", ) exec_command( capfd, "remove registry", "Service registry is not running", ) # Mix both registry and normal services exec_command( capfd, "remove registry postgres", # Registry is already removed, can't remove it again # But this is enough to confirm that registry and services can be mixed up "Service registry is not running", # The main stack is already removed, can't remove postgres # But this is enough to confirm that registry and services can be mixed up "Stack rem is not running, deploy it with", ) start_registry(capfd) exec_command( capfd, "run --detach --pull --port 7777 adminer", "You can access Adminer interface", ) exec_command( capfd, "run --detach --pull --port 8888 swaggerui", "You can access SwaggerUI web page", ) exec_command( capfd, "remove adminer postgres swaggerui", "Service adminer removed", "Service swaggerui removed", ) exec_command( capfd, "remove adminer postgres swaggerui", "Service adminer is not running", "Service swaggerui is not running", ) assert get_containers() == NONE # Verify that removal of interfaces does not stop the main stack, if not requested exec_command(capfd, "start backend", "Stack started") time.sleep(2) assert get_containers() == BACKEND_ONLY exec_command(capfd, "remove adminer", "Service adminer is not running") assert get_containers() == BACKEND_ONLY exec_command(capfd, "remove", "Stack removed")
def test_install(capfd: Capture, faker: Faker) -> None: execute_outside(capfd, "install") project = random_project_name(faker) create_project( capfd=capfd, name=project, auth="postgres", frontend="no", ) init_project(capfd) # Initially the controller is installed from pip exec_command( capfd, "update -i main", "Controller not updated because it is installed outside this project", "Installation path is ", ", the current folder is ", "All updated", ) with TemporaryRemovePath(SUBMODULES_DIR.joinpath("do")): exec_command( capfd, "install", "missing as submodules/do. You should init your project", ) exec_command(capfd, "install 100.0", "Invalid version") exec_command(capfd, "install docker", "Docker current version:", "Docker installed version:") exec_command(capfd, "install compose", "Docker compose is installed") exec_command( capfd, "install buildx", "Docker buildx current version:", "Docker buildx installed version:", ) exec_command(capfd, "install auto") r = git.get_repo("submodules/do") git.switch_branch(r, "0.7.6") exec_command( capfd, "install", f"Controller repository switched to {__version__}", ) # Here the controller is installed in editable mode from the correct submodules # folder (this is exactly the default normal condition) exec_command( capfd, "update -i main", # Controller installed from {} and updated "Controller installed from ", " and updated", "All updated", ) # Install the controller from a linked folder to verify that the post-update checks # are able to correctly resolve symlinks # ########################################################### # Copied from test_init_check_update.py from here... SUBMODULES_DIR.rename("submodules.bak") SUBMODULES_DIR.mkdir() # This is to re-fill the submodules folder, # these folder will be removed by the next init exec_command(capfd, "init", "Project initialized") modules_path = Path("submodules.bak").resolve() exec_command( capfd, f"init --submodules-path {modules_path}", "Path submodules/http-api already exists, removing", "Project initialized", ) # ... to here # ########################################################### exec_command( capfd, "update -i main", # Controller installed from {} and updated "Controller installed from ", " and updated", "All updated", ) # This test will change the required version pconf = f"projects/{project}/project_configuration.yaml" # Read and change the content fin = open(pconf) data = fin.read() data = data.replace(f'rapydo: "{__version__}"', 'rapydo: "0.7.6"') fin.close() # Write the new content fin = open(pconf, "wt") fin.write(data) fin.close() exec_command( capfd, "version", f"This project is not compatible with rapydo version {__version__}", "Please downgrade rapydo to version 0.7.6 or modify this project", ) # Read and change the content fin = open(pconf) data = fin.read() data = data.replace('rapydo: "0.7.6"', 'rapydo: "99.99.99"') fin.close() # Write the new content fin = open(pconf, "wt") fin.write(data) fin.close() exec_command( capfd, "version", f"This project is not compatible with rapydo version {__version__}", "Please upgrade rapydo to version 99.99.99 or modify this project", ) exec_command(capfd, "install --no-editable 0.8") exec_command(capfd, "install --no-editable") exec_command(capfd, "install")
def test_init(capfd: Capture, faker: Faker) -> None: execute_outside(capfd, "init") create_project( capfd=capfd, name=random_project_name(faker), auth="postgres", frontend="no", ) exec_command( capfd, "check -i main", "Repo https://github.com/rapydo/http-api.git missing as submodules/http-api.", "You should init your project", ) if Configuration.swarm_mode: exec_command( capfd, "-e HEALTHCHECK_INTERVAL=1s -e SWARM_MANAGER_ADDRESS=127.0.0.1 init", "docker compose is installed", "Initializing Swarm with manager IP 127.0.0.1", "Swarm is now initialized", "Project initialized", ) docker = Docker() docker.client.swarm.leave(force=True) local_ip = system.get_local_ip(production=False) exec_command( capfd, "-e HEALTHCHECK_INTERVAL=1s -e SWARM_MANAGER_ADDRESS= init", "docker compose is installed", "Swarm is now initialized", f"Initializing Swarm with manager IP {local_ip}", "Project initialized", ) exec_command( capfd, "init", "Swarm is already initialized", "Project initialized", ) else: init_project(capfd) repo = git.get_repo("submodules/http-api") git.switch_branch(repo, "0.7.6") exec_command( capfd, "init", f"Switched http-api branch from 0.7.6 to {__version__}", f"build-templates already set on branch {__version__}", f"do already set on branch {__version__}", ) os.rename("submodules", "submodules.bak") os.mkdir("submodules") # This is to re-fill the submodules folder, # these folder will be removed by the next init exec_command(capfd, "init", "Project initialized") modules_path = Path("submodules.bak").resolve() with TemporaryRemovePath(Path("submodules.bak/do")): exec_command( capfd, f"init --submodules-path {modules_path}", "Submodule do not found in ", ) exec_command( capfd, f"init --submodules-path {modules_path}", "Path submodules/http-api already exists, removing", "Project initialized", ) assert os.path.islink("submodules/do") assert not os.path.islink("submodules.bak/do") # Init again, this time in submodules there are links... # and will be removed as well as the folders exec_command( capfd, f"init --submodules-path {modules_path}", "Path submodules/http-api already exists, removing", "Project initialized", ) exec_command( capfd, "init --submodules-path invalid/path", "Local path not found: invalid/path", ) exec_command( capfd, "--prod init -f", "Created default .projectrc file", "Project initialized", ) exec_command( capfd, "--prod -e MYVAR=MYVAL init -f", "Created default .projectrc file", "Project initialized", ) with open(".projectrc") as projectrc: lines = [line.strip() for line in projectrc.readlines()] assert "MYVAR: MYVAL" in lines
def test_all(capfd: Capture) -> None: execute_outside(capfd, "logs backend") create_project( capfd=capfd, name="first", auth="postgres", frontend="angular", ) init_project(capfd) start_registry(capfd) pull_images(capfd) start_project(capfd) # Invalid services are refused exec_command( capfd, "logs --tail 1 invalid", "No such service: invalid", ) now = datetime.now() signal.signal(signal.SIGALRM, mock_KeyboardInterrupt) signal.alarm(5) # Here using main services option exec_command( capfd, "logs --tail 10 --follow backend", "REST API backend server is ready to be launched", ) end = datetime.now() assert (end - now).seconds >= 4 signal.alarm(0) exec_command( capfd, "logs backend", "REST API backend server is ready to be launched", ) exec_command( capfd, "logs --tail 1", "Enabled services: backend, frontend, postgres", ) exec_command( capfd, "logs --tail 1 backend", "Enabled services: backend", ) exec_command( capfd, "logs --tail 1 frontend", "Enabled services: frontend", ) exec_command( capfd, "logs --tail 1 backend frontend", "Enabled services: backend, frontend", ) exec_command( capfd, "logs --tail 1 frontend backend", "Enabled services: backend, frontend", ) exec_command( capfd, "logs --tail 1 backend invalid", "No such service: invalid", ) # Backend logs are never timestamped exec_command( capfd, "logs --tail 20 backend", # Logs are not prefixed because only one service is shown "Testing mode", ) # Debug code... no logs in swarm mode for frontend, even after a wait 20... if Configuration.swarm_mode: exec_command( capfd, "logs --tail 10 frontend", ) else: timestamp = now.strftime("%Y-%m-%dT") # Frontend logs are always timestamped exec_command( capfd, "logs --tail 10 frontend", # Logs are not prefixed because only one service is shown f"{timestamp}", ) # Follow flag is not supported in swarm mode with multiple services if Configuration.swarm_mode: # Multiple services are not supported in swarm mode exec_command( capfd, "logs --follow", "Follow flag is not supported on multiple services", ) exec_command( capfd, "logs --follow backend frontend", "Follow flag is not supported on multiple services", )
def test_base(capfd: Capture) -> None: execute_outside(capfd, "check") create_project( capfd=capfd, name="third", auth="postgres", frontend="angular", ) init_project(capfd) repo = git.get_repo("submodules/http-api") git.switch_branch(repo, "0.7.6") exec_command( capfd, "check -i main", f"http-api: wrong branch 0.7.6, expected {__version__}", f"You can fix it with {colors.RED}rapydo init{colors.RESET}", ) init_project(capfd) with TemporaryRemovePath(DATA_DIR): exec_command( capfd, "check -i main --no-git --no-builds", "Folder not found: data", "Please note that this command only works from inside a rapydo-like repo", "Verify that you are in the right folder, now you are in: ", ) with TemporaryRemovePath(Path("projects/third/builds")): exec_command( capfd, "check -i main --no-git --no-builds", "Project third is invalid: required folder not found projects/third/builds", ) with TemporaryRemovePath(Path(".gitignore")): exec_command( capfd, "check -i main --no-git --no-builds", "Project third is invalid: required file not found .gitignore", ) # Add a custom image to extend base backend image: with open("projects/third/confs/commons.yml", "a") as f: f.write( """ services: backend: build: ${PROJECT_DIR}/builds/backend image: third/backend:${RAPYDO_VERSION} """ ) os.makedirs("projects/third/builds/backend") with open("projects/third/builds/backend/Dockerfile", "w+") as f: f.write( f""" FROM rapydo/backend:{__version__} RUN mkdir xyz """ ) # Skipping main because we are on a fake git repository exec_command( capfd, "check -i main", f" image, execute {colors.RED}rapydo pull", f" image, execute {colors.RED}rapydo build", f"Compose is installed with version {COMPOSE_VERSION}", f"Buildx is installed with version {BUILDX_VERSION}", "Checks completed", ) exec_command( capfd, "--stack invalid check -i main", "Failed to read projects/third/confs/invalid.yml: File does not exist", ) os.mkdir("submodules/rapydo-confs") exec_command( capfd, "check -i main --no-git --no-builds", "Project third contains an obsolete file or folder: submodules/rapydo-confs", ) shutil.rmtree("submodules/rapydo-confs") # Test selection with two projects create_project( capfd=capfd, name="justanother", auth="postgres", frontend="no", ) os.remove(".projectrc") exec_command( capfd, "check -i main --no-git --no-builds", "Multiple projects found, please use --project to specify one of the following", ) # Test with zero projects with TemporaryRemovePath(Path("projects")): os.mkdir("projects") # in this case SystemExit is raised in the command init... with pytest.raises(SystemExit): exec_command( capfd, "check -i main --no-git --no-builds", "No project found (is projects folder empty?)", ) shutil.rmtree("projects") exec_command( capfd, "-p third check -i main --no-git --no-builds", "Checks completed", ) # Numbers are not allowed as first characters pname = "2invalidcharacter" os.makedirs(f"projects/{pname}") exec_command( capfd, f"-p {pname} check -i main --no-git --no-builds", "Wrong project name, found invalid characters: 2", ) shutil.rmtree(f"projects/{pname}") invalid_characters = { "_": "_", "-": "-", "C": "C", # Invalid characters in output are ordered # Numbers are allowed if not leading "_C-2": "-C_", } # Check invalid and reserved project names for invalid_key, invalid_value in invalid_characters.items(): pname = f"invalid{invalid_key}character" os.makedirs(f"projects/{pname}") exec_command( capfd, f"-p {pname} check -i main --no-git --no-builds", f"Wrong project name, found invalid characters: {invalid_value}", ) shutil.rmtree(f"projects/{pname}") os.makedirs("projects/celery") exec_command( capfd, "-p celery check -i main --no-git --no-builds", "You selected a reserved name, invalid project name: celery", ) shutil.rmtree("projects/celery") exec_command( capfd, "-p fourth check -i main --no-git --no-builds", "Wrong project fourth", "Select one of the following: ", ) # Test init of data folders shutil.rmtree(LOGS_FOLDER) assert not LOGS_FOLDER.is_dir() # Let's restore .projectrc and data/logs init_project(capfd, "--project third") assert LOGS_FOLDER.is_dir() exec_command( capfd, "check -i main --no-git --no-builds", "Checks completed", ) # Test dirty repo fin = open("submodules/do/new_file", "wt+") fin.write("xyz") fin.close() exec_command( capfd, "check -i main", "You have unstaged files on do", "Untracked files:", "submodules/do/new_file", ) with open(".gitattributes", "a") as a_file: a_file.write("\n") a_file.write("# new line") exec_command( capfd, "check -i main", ".gitattributes changed, " f"please execute {colors.RED}rapydo upgrade --path .gitattributes", ) exec_command( capfd, "--prod check -i main --no-git --no-builds", "The following variables are missing in your configuration", "You can fix this error by updating your .projectrc file", ) # Default ALCHEMY_PASSWORD has as score of 2 exec_command( capfd, "-e MIN_PASSWORD_SCORE=3 check -i main --no-git --no-builds", "The password used in ALCHEMY_PASSWORD is weak", ) exec_command( capfd, "-e MIN_PASSWORD_SCORE=4 check -i main --no-git --no-builds", "The password used in ALCHEMY_PASSWORD is very weak", ) exec_command( capfd, "-e MIN_PASSWORD_SCORE=4 -e AUTH_DEFAULT_PASSWORD=x check -i main --no-git --no-builds", "The password used in AUTH_DEFAULT_PASSWORD is extremely weak", ) exec_command( capfd, "--prod init -f", "Created default .projectrc file", "Project initialized", ) exec_command( capfd, "--prod check -i main --no-git --no-builds", "Checks completed", ) if Configuration.swarm_mode: # Skipping main because we are on a fake git repository exec_command( capfd, "check -i main", "Swarm is correctly initialized", "Checks completed", ) docker = Docker() docker.client.swarm.leave(force=True) exec_command( capfd, "check -i main", f"Swarm is not initialized, please execute {colors.RED}rapydo init", ) exec_command( capfd, "init", "Swarm is now initialized", "Project initialized", ) exec_command( capfd, "check -i main", "Swarm is correctly initialized", "Checks completed", ) check = "check -i main --no-git --no-builds" exec_command( capfd, f"-e ASSIGNED_MEMORY_BACKEND=50G {check}", "Your deployment requires 50GB of RAM but your nodes only have", # The error does not halt the checks execution "Checks completed", ) exec_command( capfd, f"-e ASSIGNED_CPU_BACKEND=50.0 {check}", "Your deployment requires ", " cpus but your nodes only have ", # The error does not halt the checks execution "Checks completed", ) exec_command( capfd, f"-e DEFAULT_SCALE_BACKEND=55 -e ASSIGNED_MEMORY_BACKEND=1G {check}", "Your deployment requires 55GB of RAM but your nodes only have", # The error does not halt the checks execution "Checks completed", ) exec_command( capfd, f"-e DEFAULT_SCALE_BACKEND=50 -e ASSIGNED_CPU_BACKEND=1.0 {check}", "Your deployment requires ", " cpus but your nodes only have ", # The error does not halt the checks execution "Checks completed", )
def test_all(capfd: Capture, faker: Faker) -> None: execute_outside(capfd, "pull") execute_outside(capfd, "build") project2 = random_project_name(faker) create_project( capfd=capfd, name="testbuild", auth="no", frontend="no", services=["rabbit"], ) init_project(capfd) create_project( capfd=capfd, name=project2, auth="no", frontend="no", services=["rabbit"], ) if Configuration.swarm_mode: exec_command( capfd, "pull", "Registry 127.0.0.1:5000 not reachable.", ) exec_command( capfd, "build", "docker buildx is installed", "Registry 127.0.0.1:5000 not reachable.", ) start_registry(capfd) image = f"rapydo/backend:{__version__}" exec_command( capfd, "start", f"Missing {image} image, execute {colors.RED}rapydo pull backend", ) exec_command( capfd, "-e ACTIVATE_RABBIT=0 pull --quiet rabbit", "No such service: rabbit", ) exec_command( capfd, "pull --quiet proxy", "No such service: proxy", ) exec_command( capfd, "pull --quiet", "Base images pulled from docker hub", ) # Basic pull exec_command( capfd, "pull xxx", "No such service: xxx", ) # --all is useless here... added just to include the parameter in some tests. # A true test on such parameter would be quite complex... exec_command( capfd, "pull --all --quiet backend", "Images pulled from docker hub", ) # Add a custom image to extend base rabbit image: with open("projects/testbuild/confs/commons.yml", "a") as f: f.write(""" services: rabbit: build: ${PROJECT_DIR}/builds/rabbit image: testbuild/rabbit:${RAPYDO_VERSION} """) # Missing folder exec_command( capfd, "build rabbit", "docker buildx is installed", "Build path not found", ) os.makedirs("projects/testbuild/builds/rabbit") # Missing Dockerfile exec_command( capfd, "build rabbit", "docker buildx is installed", "Build path not found: ", "projects/testbuild/builds/rabbit/Dockerfile", ) # Empty Dockerfile with open("projects/testbuild/builds/rabbit/Dockerfile", "w+") as f: pass exec_command( capfd, "build rabbit", "docker buildx is installed", "Invalid Dockerfile, no base image found in ", "projects/testbuild/builds/rabbit/Dockerfile", ) # Missing base image with open("projects/testbuild/builds/rabbit/Dockerfile", "w+") as f: f.write("RUN ls") exec_command( capfd, "build rabbit", "docker buildx is installed", "Invalid Dockerfile, no base image found in ", "projects/testbuild/builds/rabbit/Dockerfile", ) # Invalid RAPyDo template with open("projects/testbuild/builds/rabbit/Dockerfile", "w+") as f: f.write("FROM rapydo/invalid") exec_command( capfd, "build rabbit", "docker buildx is installed", "Unable to find rapydo/invalid in this project", "Please inspect the FROM image in", "projects/testbuild/builds/rabbit/Dockerfile", ) image = f"testbuild/rabbit:${__version__}" exec_command( capfd, "start", f" image, execute {colors.RED}rapydo build rabbit", ) # Not a RAPyDo child but build is possibile with open("projects/testbuild/builds/rabbit/Dockerfile", "w+") as f: f.write("FROM ubuntu") exec_command( capfd, "build rabbit", "docker buildx is installed", "Custom images built", ) with open("projects/testbuild/builds/rabbit/Dockerfile", "w+") as f: f.write(f""" FROM rapydo/rabbitmq:{__version__} # Just a simple command to differentiate from the parent RUN mkdir xyz """) r = Repo(".") r.git.add("-A") r.git.commit("-a", "-m", "'fake'") exec_command( capfd, "build rabbit", "docker buildx is installed", f"naming to docker.io/testbuild/rabbit:{__version__}", "Custom images built", ) test_file = Path("projects/testbuild/builds/rabbit/test") with open(test_file, "w+") as f: f.write("test") exec_command( capfd, "check -i main --no-git", "Can't retrieve a commit history for ", "Checks completed", ) test_file.unlink() exec_command( capfd, f"-e ACTIVATE_RABBIT=0 -p {project2} build --core rabbit", "No such service: rabbit", ) # Rebuild core rabbit image => custom rabbit is now obsolete # Please note the use of the project 2. # This way we prevent to rebuilt the custom image of testbuild # This simulate a pull updating a core image making the custom image obsolete if Configuration.swarm_mode: swarm_push_warn = "Local registry push is not implemented yet for core images" else: swarm_push_warn = "" exec_command( capfd, f"-p {project2} build --core rabbit", "Core images built", swarm_push_warn, "No custom images to build", ) exec_command( capfd, "check -i main --no-git", f"Obsolete image testbuild/rabbit:{__version__}", "built on ", " that changed on ", f"Update it with: {colors.RED}rapydo build rabbit", ) # Add a second service with the same image to test redundant builds with open("projects/testbuild/confs/commons.yml", "a") as f: f.write(""" rabbit2: build: ${PROJECT_DIR}/builds/rabbit image: testbuild/rabbit:${RAPYDO_VERSION} """) fin = open("submodules/build-templates/backend/Dockerfile", "a") fin.write("xyz") fin.close() r = Repo("submodules/build-templates") r.git.commit("-a", "-m", "'fake'") exec_command( capfd, "check -i main", f"Obsolete image rapydo/backend:{__version__}", "built on ", " but changed on ", f"Update it with: {colors.RED}rapydo pull backend", ) exec_command(capfd, "remove", "Stack removed") # Add a third service without a build to verify that pull includes it # to be the base image even if defined in custom part with open("projects/testbuild/confs/commons.yml", "a") as f: f.write(""" rabbit3: image: alpine:latest environment: ACTIVATE: 1 """) exec_command( capfd, "pull --quiet rabbit3", "Base images pulled from docker hub", ) # Now this should fail because pull does not include custom services exec_command( capfd, "start rabbit3", "Stack started", )
def test_base(capfd: Capture, faker: Faker) -> None: execute_outside(capfd, "reload") project_name = random_project_name(faker) create_project( capfd=capfd, name=project_name, auth="no", frontend="no", services=["fail2ban"], ) init_project(capfd) exec_command(capfd, "reload", "No service reloaded") exec_command(capfd, "reload backend", "No service reloaded") exec_command(capfd, "reload invalid", "No such service: invalid") exec_command(capfd, "reload backend invalid", "No such service: invalid") start_registry(capfd) pull_images(capfd) start_project(capfd) exec_command(capfd, "reload backend", "Reloading Flask...") if Configuration.swarm_mode: service = "backend" exec_command( capfd, "start backend", "Stack started", ) exec_command( capfd, "scale backend=2 --wait", f"{project_name}_backend scaled to 2", "Service converged", ) else: service = "fail2ban" exec_command( capfd, "scale fail2ban=2", "Scaling services: fail2ban=2...", "Services scaled: fail2ban=2", ) time.sleep(4) docker = Docker() container1 = docker.get_container(service, slot=1) container2 = docker.get_container(service, slot=2) assert container1 is not None assert container2 is not None assert container1 != container2 exec_command( capfd, f"reload {service}", f"Executing command on {container1[0]}", f"Executing command on {container2[0]}", ) exec_command(capfd, "shell backend -u root 'rm /usr/local/bin/reload'") exec_command( capfd, "reload backend", "Service backend does not support the reload command" ) exec_command(capfd, "remove", "Stack removed")
def test_add(capfd: Capture) -> None: execute_outside(capfd, "add endpoint x") execute_outside(capfd, "upgrade --path x") create_project( capfd=capfd, name="second", auth="postgres", frontend="angular", ) init_project(capfd) path = Path("projects/second/backend/endpoints/xyz.py") test_path = Path("projects/second/backend/tests/test_endpoints_xyz.py") assert not path.exists() assert not test_path.exists() exec_command( capfd, "add endpoint xyz --add-tests", f"Endpoint created: {path}", f"Tests scaffold created: {test_path}", ) exec_command( capfd, "add endpoint xyz", f"{path} already exists", ) exec_command( capfd, "add --force endpoint xyz", f"Endpoint created: {path}", ) assert path.is_file() assert test_path.is_file() path = Path("projects/second/backend/tasks/xyz.py") assert not path.exists() exec_command( capfd, "add task xyz --add-tests", f"Task created: {path}", "Tests for tasks not implemented yet", ) exec_command( capfd, "add task xyz", f"{path} already exists", ) exec_command( capfd, "add --force task xyz", f"Task created: {path}", ) assert path.is_file() path = Path("projects/second/frontend/app/components/xyz") test_path = Path("projects/second/frontend/app/components/xyz/xyz.spec.ts") assert not path.exists() assert not path.joinpath("xyz.ts").exists() assert not path.joinpath("xyz.html").exists() exec_command( capfd, "add component xyz --add-tests", "Added import { XyzComponent } from '@app/components/xyz/xyz'; to module ", "Added XyzComponent to module declarations", f"Component created: {path}", f"Tests scaffold created: {test_path}", ) assert path.is_dir() assert path.joinpath("xyz.ts").is_file() assert path.joinpath("xyz.html").is_file() exec_command( capfd, "add component xyz", f"{path}/xyz.ts already exists", ) exec_command( capfd, "add --force component xyz", f"Component created: {path}", ) shutil.rmtree(path) exec_command( capfd, "add component xyz", "Import already included in module file", "Added XyzComponent to module declarations", f"Component created: {path}", ) exec_command( capfd, "add component sink", "Added route to module declarations", "Added SinkComponent to module declarations", ) path = Path("projects/second/frontend/app/services") assert not path.exists() assert not path.joinpath("xyz.ts").exists() exec_command( capfd, "add service xyz --add-tests", "Added import { XyzService } from '@app/services/xyz'; to module file", "Added XyzService to module declarations", f"Service created: {path}", "Tests for services not implemented yet", ) assert path.is_dir() assert path.joinpath("xyz.ts").is_file() exec_command( capfd, "add service xyz", f"{path}/xyz.ts already exists", ) exec_command( capfd, "add --force service xyz", f"Service created: {path}", ) path.joinpath("xyz.ts").unlink() exec_command( capfd, "add service xyz", "Import already included in module file", "Added XyzService to module declarations", f"Service created: {path}", ) path = Path( "projects/second/frontend/integration/app_mypath_my_id.spec.ts") assert not path.exists() exec_command( capfd, "add integration_test app/mypath/:my_id --add-tests", "Add integration_test does not support --add-tests flag", ) exec_command( capfd, "add integration_test app/mypath/:my_id", f"Integration test created: {path}", ) exec_command( capfd, "add integration_test app/mypath/:my_id", f"{path} already exists", ) # Here a little variant, by adding a leading / exec_command( capfd, "add --force integration_test /app/mypath/:my_id", f"Integration test created: {path}", ) assert path.is_file() path = Path(".github/workflows/github_actions-backend.yml") assert not path.exists() exec_command( capfd, "add workflow unexpectedname", "Invalid workflow name, expected: backend, frontend, cypress, mypy", ) exec_command( capfd, "add workflow backend --add-tests", "Add workflow does not support --add-tests flag", ) exec_command( capfd, "add workflow backend", f"GitHub Actions workflow created: {path}", ) exec_command( capfd, "add workflow backend", f"{path} already exists", ) exec_command( capfd, "add --force workflow backend", f"GitHub Actions workflow created: {path}", ) assert path.is_file() exec_command( capfd, "add abc xyz", "Invalid value for", "'abc' is not one of 'endpoint', 'task', 'component', 'service', ", ) exec_command(capfd, "upgrade") exec_command(capfd, "upgrade --path invalid", "Invalid path, cannot upgrade invalid") exec_command(capfd, "upgrade --path .gitignore")