import time import pytest from pytest_docker_tools import container from config.login import USERNAME_KEY from sql.models import User, Place, Guide db_container = container( scope='session', image='kartoza/postgis:9.6-2.4', ports={ '5432/tcp': '5432/tcp', }, environment={ 'POSTGRES_USER': '******', 'POSTGRES_PASS': '******', 'POSTGRES_DBNAME': 'testdb', 'ALLOW_IP_RANGE': '0.0.0.0/0', } ) @pytest.fixture(scope='session') def app(): from main import create_app app = create_app({ 'SQLALCHEMY_DATABASE_URI': 'postgresql+pg8000://testuser:testpassword@/testdb', 'SQLALCHEMY_TRACK_MODIFICATIONS': False, 'SECRET_KEY': '=g-R_B?6q/7.(9c#',
from pytest_docker_tools import container, fetch, network from s3fs import S3FileSystem postgres_image = fetch(repository="postgres:12.1-alpine") s3_image = fetch(repository="minio/minio:RELEASE.2019-12-30T05-45-39Z") s3_init_image = fetch(repository="minio/mc:RELEASE.2019-08-29T00-40-57Z") docker_network = network(name="testnetwork") postgres = container( image="{postgres_image.id}", environment={ "POSTGRES_USER": "******", "POSTGRES_PASSWORD": "******" }, ports={"5432/tcp": None}, volumes={ os.path.join(os.path.dirname(__file__), "postgres-init.sql"): { "bind": "/docker-entrypoint-initdb.d/postgres-init.sql" }, os.path.join(os.path.dirname(__file__), "testdata.csv"): { "bind": "/docker-entrypoint-initdb.d/testdata.csv" }, }, ) def test_postgres_to_local_csv(mocker, postgres, tmp_path): mocker.patch.object( PostgresHook, "get_connection", return_value=Connection( conn_id="test",
def postgres_credentials(): """Namedtuple containing postgres credentials to define only once.""" PostgresCredentials = namedtuple("PostgresCredentials", ["username", "password"]) return PostgresCredentials("testuser", "testpass") postgres_image = fetch(repository="postgres:11.1-alpine") postgres = container( image="{postgres_image.id}", environment={ "POSTGRES_USER": "******", "POSTGRES_PASSWORD": "******", }, ports={"5432/tcp": None}, volumes={ path.join(path.dirname(__file__), "postgres-init.sql"): { "bind": "/docker-entrypoint-initdb.d/postgres-init.sql" } }, ) def test_postgres_to_local_operator(test_dag, mocker, tmpdir, postgres, postgres_credentials): mocker.patch.object( PostgresHook, "get_connection", return_value=Connection( host="localhost",
# override again def cleanup(): nbexchange.stop() NbExchange.clear_instance() request.addfinalizer(cleanup) # convenience for accessing nbexchange in tests nbexchange.url = f"http://127.0.0.1:{nbexchange.port}{nbexchange.base_url}".rstrip( "/" ) return nbexchange @pytest.fixture(scope="session") def db(): """Get a db session""" _db = Session() # user = nbexchange.models.users.User( name=getuser(), org_id=1 ) # TODO: remove Magic number _db.add(user) _db.commit() return _db # Docker images nbexchange_image = build(path=".") container = container(image="{nbexchange_image.id}", ports={"9000/tcp": None})
from pytest_docker_tools import container, build import requests vmware_exporter_image = build(path='.') vmware_exporter = container( image='{vmware_exporter_image.id}', ports={ '9272/tcp': None, }, ) def test_container_starts(vmware_exporter): container_addr = vmware_exporter.get_addr('9272/tcp') assert requests.get( 'http://{}:{}/healthz'.format(*container_addr)).status_code == 200 def test_container_404(vmware_exporter): container_addr = vmware_exporter.get_addr('9272/tcp') assert requests.get( 'http://{}:{}/meetrics'.format(*container_addr)).status_code == 404
''' This module contains tests of the 'orchestration' of dependent docker units. If a container depends on an image and a container, and that container depends on another container, and so on, then all the contains should be built in the right order. ''' from pytest_docker_tools import build, container, fetch, network, volume redis_image = fetch(repository='redis:latest') redis0 = container(image='{redis_image.id}', environment={ 'MARKER': 'redis0-0sider', }) foobar = build(path='tests/integration') mynetwork = network() myvolume = volume() mycontainer = container( image='{foobar.id}', network='{mynetwork.id}', volumes={ '{myvolume.id}': { 'bind': '/var/tmp' }, }, environment={ 'REDIS_IP': lambda redis0: redis0.ips.primary, }, dns=['{redis0.ips.primary}'],
# pylint: disable=invalid-name,no-value-for-parameter,redefined-outer-name from time import sleep from pytest_docker_tools import container, build from clai.tools.docker_utils import execute_cmd from test_integration.conftest import get_base_path my_clai_image = build( path=get_base_path(), dockerfile='./test_integration/docker/centos/Dockerfile.no.install') my_clai = container( image='{my_clai_image.id}', ) INSTALL_CORRECTLY_MESSAGE = "CLAI has been installed correctly, you will need to restart your shell." UNINSTALL_CORRECTLY_MESSAGE = "CLAI has been uninstalled correctly, you will need to restart your shell." def test_install_should_finish_correctly(my_clai): install_output = execute_cmd(my_clai, "sudo ./install.sh --unassisted --demo") assert INSTALL_CORRECTLY_MESSAGE in install_output def test_install_should_modify_correct_startup_files(my_clai): execute_cmd(my_clai, "sudo ./install.sh --unassisted --demo") files = my_clai.get_files('/root')
if "Ray runtime started" in super().logs(): return True return False def client(self): port = self.ports["8000/tcp"][0] return HTTPConnection(f"localhost:{port}") gcs_network = network(driver="bridge") redis_image = fetch(repository="redis:latest") redis = container( image="{redis_image.id}", network="{gcs_network.name}", command=("redis-server --save 60 1 --loglevel" " warning --requirepass 5241590000000000"), ) header_node = container( image="ray_ci:v1", name="gcs", network="{gcs_network.name}", command=["ray", "start", "--head", "--block", "--num-cpus", "0"], environment={"RAY_REDIS_ADDRESS": "{redis.ips.primary}:6379"}, wrapper_class=Container, ports={ "8000/tcp": None, }, )
import os import socket from pytest_docker_tools import build, container, fetch from pytest_docker_tools.utils import wait_for_callable test_container_1_image = fetch(repository='redis:latest') test_container_1 = container( image='{test_container_1_image.id}', ports={ '6379/tcp': None, }, ) ipv6_folder = os.path.join(os.path.dirname(__file__), 'fixtures/ipv6') ipv6_image = build(path=ipv6_folder) ipv6 = container(image='{ipv6_image.id}', ports={ '1234/udp': None, }) def test_container_created(docker_client, test_container_1): for c in docker_client.containers.list(ignore_removed=True): if c.id == test_container_1.id: # Looks like we managed to start one! break else: assert False, 'Looks like we failed to start a container'
import ipaddress import time import requests from prometheus_client.parser import text_string_to_metric_families from pytest_docker_tools import build, container tshark_exporter_image = build(path='.') tshark_exporter = container( image='{tshark_exporter_image.id}', ports={ '9431/tcp': None, }, ) def test_metrics_server_responds_immediately(tshark_exporter): port = tshark_exporter.ports['9431/tcp'][0] response = requests.get(f'http://localhost:{port}/metrics') assert response.status_code == 200 assert b'# HELP tshark_exporter_match_bytes' in response.content assert b'# HELP tshark_exporter_match_count' in response.content def test_simple_capture(tshark_exporter): port = tshark_exporter.ports['9431/tcp'][0] while 'Capturing on \'eth0\'' not in tshark_exporter.logs(): time.sleep(0.5)
from docker.client import DockerClient from docker.errors import NotFound import pytest from pytest_docker_tools import build, container, fetch, image from pytest_docker_tools.utils import LABEL_REUSABLE, wait_for_callable logger = logging.getLogger(__name__) test_container_1_image = fetch(repository="redis:latest") test_container_1_same_image = image(name="redis:latest") test_container_1 = container( image="{test_container_1_image.id}", ports={ "6379/tcp": None, }, name="test_container", ) original_container_1 = container( image="{test_container_1_same_image.id}", ports={ "6379/tcp": None, }, name="test_container_org", ) ipv6_folder = os.path.join(os.path.dirname(__file__), "fixtures/ipv6") ipv6_image = build(path=ipv6_folder) ipv6 = container(
from tests import constants from tests.utils.container_wrappers import ( PostgresContainer, QuipucordsContainer, ScanTargetContainer, ) from tests.utils.http import BaseUrlClient, QPCAuth # pylint: disable=no-value-for-parameter postgres_container = container( environment=dict( POSTGRES_USER=constants.POSTGRES_USER, POSTGRES_PASSWORD=constants.POSTGRES_PASSWORD, POSTGRES_DB=constants.POSTGRES_DB, ), image="postgres:14.1", restart_policy={"Name": "on-failure"}, scope="class", timeout=constants.READINESS_TIMEOUT_SECONDS, wrapper_class=PostgresContainer, ) qpc_server_image = build( path=constants.PROJECT_ROOT_DIR.as_posix(), rm=constants.CLEANUP_DOCKER_LAYERS, forcerm=constants.CLEANUP_DOCKER_LAYERS, ) qpc_server_container = container( environment=dict( ANSIBLE_LOG_LEVEL=constants.QPC_ANSIBLE_LOG_LEVEL, DJANGO_LOG_LEVEL=constants.QUIPUCORDS_LOG_LEVEL,
import os import socket from pytest_docker_tools import build, container, fetch from pytest_docker_tools.utils import wait_for_callable test_container_1_image = fetch(repository="redis:latest") test_container_1 = container( image="{test_container_1_image.id}", ports={ "6379/tcp": None, }, ) ipv6_folder = os.path.join(os.path.dirname(__file__), "fixtures/ipv6") ipv6_image = build(path=ipv6_folder) ipv6 = container( image="{ipv6_image.id}", ports={ "1234/udp": None, }, ) def test_container_created(docker_client, test_container_1): for c in docker_client.containers.list(ignore_removed=True): if c.id == test_container_1.id: # Looks like we managed to start one! break else:
import os from pytest_docker_tools import build, container def get_base_path(): root_path = os.getcwd() print(root_path) if 'test_integration' in root_path: return '../' return '.' my_clai_installed_image = build( path=get_base_path(), dockerfile='./test_integration/docker/centos/Dockerfile') my_clai_module = container(image='{my_clai_installed_image.id}', scope='module') def pytest_generate_tests(metafunc): if "command" in metafunc.fixturenames: commands = getattr(metafunc.cls, 'get_commands_to_execute')(metafunc.cls) commands_expected = getattr(metafunc.cls, 'get_commands_expected')(metafunc.cls) metafunc.parametrize(["command", 'command_expected'], list(zip(commands, commands_expected)))
import os from http.client import HTTPConnection import pytest from pytest_docker_tools import build, container fakedns_image = build(path=os.path.join(os.path.dirname(__file__), 'dns'), ) fakedns = container(image='{fakedns_image.id}', environment={ 'DNS_EXAMPLE_COM__A': '127.0.0.1', }) apiserver_image = build(path=os.path.join(os.path.dirname(__file__), 'api'), ) apiserver = container(image='{apiserver_image.id}', ports={ '8080/tcp': None, }, dns=['{fakedns.ips.primary}']) @pytest.fixture def apiclient(apiserver): port = apiserver.ports['8080/tcp'][0] return HTTPConnection(f'localhost:{port}')
from galaxy_exporter.galaxy_exporter import app TEST_COLLECTION = 'community.kubernetes' TEST_ROLE = 'mesaguy.prometheus' client = TestClient(app) galaxy_exporter_image = build( nocache=False, scope='session', path='.', ) galaxy_exporter_container = container(image='{galaxy_exporter_image.id}', scope='session', ports={ '9654/tcp': None, }) @pytest.fixture(scope='session') def galaxy_exporter_client(galaxy_exporter_container): port = galaxy_exporter_container.ports['9654/tcp'][0] return HTTPConnection(f'localhost:{port}') @pytest.fixture(scope='session') def host(galaxy_exporter_container, request): yield testinfra.get_host("docker://" + galaxy_exporter_container.id)
from http.client import HTTPConnection import pytest from pytest_docker_tools import fetch, build, container #myhost = "192.168.1.104" my_test_backend_app_image = build( path='/root/app-deploy', ) my_test_backend_app = container( image='{my_test_backend_app_image.id}', ports={ '8000/tcp': '9000/tcp', } ) my_test_redis_server_image = fetch(repository='redis:latest') my_test_redis_server = container( image='{my_test_redis_server_image.id}', ) @pytest.mark.parametrize("i", list(range(100))) def test_redist(i, my_test_redis_server): assert my_test_redis_server.status == "running" @pytest.mark.parametrize("i", list(range(100))) def test_app(i, my_test_backend_app): assert my_test_backend_app.status == "running"
""" This module contains tests of the 'orchestration' of dependent docker units. If a container depends on an image and a container, and that container depends on another container, and so on, then all the contains should be built in the right order. """ from pytest_docker_tools import build, container, fetch, network, volume redis_image = fetch(repository="redis:latest") redis0 = container( image="{redis_image.id}", environment={ "MARKER": "redis0-0sider", }, ) foobar = build(path="tests/integration") mynetwork = network() myvolume = volume() mycontainer = container( image="{foobar.id}", network="{mynetwork.id}", volumes={ "{myvolume.id}": { "bind": "/var/tmp" }, }, environment={ "REDIS_IP": lambda redis0: redis0.ips.primary,
mysql_image = fetch(repository="mysql:8.0.18") s3_image = fetch(repository="minio/minio:RELEASE.2019-12-30T05-45-39Z") s3_init_image = fetch(repository="minio/mc:RELEASE.2019-08-29T00-40-57Z") docker_network = network(name="testnetwork") mysql = container( image="{mysql_image.id}", environment={ "MYSQL_DATABASE": "testdb", "MYSQL_USER": "******", "MYSQL_PASSWORD": "******", "MYSQL_ROOT_PASSWORD": "******", }, ports={"3306/tcp": None}, volumes={ os.path.join(os.path.dirname(__file__), "mysql-init.sql"): { "bind": "/docker-entrypoint-initdb.d/mysql-init.sql" }, os.path.join(os.path.dirname(__file__), "testdata.csv"): { "bind": "/docker-entrypoint-initdb.d/testdata.csv" }, }, command="--secure-file-priv=/docker-entrypoint-initdb.d", ) s3 = container( image="{s3_image.id}", name="s3", ports={"9000/tcp": None}, environment={
from alerta import ( get_threshold_alert_shell, get_sequence_alert_shell, get_deadman_alert_shell, ) from alerta import remove_previously_alerted, remove_inflight_events from alerta import save_alert, save_inflight_alert from alerta import determine_threshold_trigger, determine_deadman_trigger from alerta import expire_sequence_alerts, create_sequence_alerts mongo_image = fetch(repository="mongo:latest") mongo_session = container( image="{mongo_image.id}", scope="session", ports={ "27017/tcp": 27017, }, ) print("setting up logging") logging_config_file_path = Path(__file__).parent.joinpath("logging_config.yml") with open(logging_config_file_path, "r") as fd: logging_config = yaml.safe_load(fd) logging.config.dictConfig(logging_config) global logger logger = logging.getLogger() logger.info("logging established") @pytest.fixture(scope="module") def mongo_connection(mongo_session):