import csv import json import os import pytest from airflow.models import Connection from airflow_db.hooks.mysql import MysqlHook from airflow_db.operators.db_to_fs import DbToFsOperator from airflow_fs.hooks import S3Hook from pytest_docker_tools import container, fetch, network from s3fs import S3FileSystem mysql_image = fetch(repository="mysql:8.0.18") s3_image = fetch(repository="minio/minio:RELEASE.2019-12-30T05-45-39Z") s3_init_image = fetch(repository="minio/mc:RELEASE.2019-08-29T00-40-57Z") docker_network = network(name="testnetwork") mysql = container( image="{mysql_image.id}", environment={ "MYSQL_DATABASE": "testdb", "MYSQL_USER": "******", "MYSQL_PASSWORD": "******", "MYSQL_ROOT_PASSWORD": "******", }, ports={"3306/tcp": None}, volumes={ os.path.join(os.path.dirname(__file__), "mysql-init.sql"): { "bind": "/docker-entrypoint-initdb.d/mysql-init.sql" }, os.path.join(os.path.dirname(__file__), "testdata.csv"): {
import pytest from pytest_docker_tools import fetch, build, container #myhost = "192.168.1.104" my_test_backend_app_image = build( path='/root/app-deploy', ) my_test_backend_app = container( image='{my_test_backend_app_image.id}', ports={ '8000/tcp': '9000/tcp', } ) my_test_redis_server_image = fetch(repository='redis:latest') my_test_redis_server = container( image='{my_test_redis_server_image.id}', ) @pytest.mark.parametrize("i", list(range(100))) def test_redist(i, my_test_redis_server): assert my_test_redis_server.status == "running" @pytest.mark.parametrize("i", list(range(100))) def test_app(i, my_test_backend_app): assert my_test_backend_app.status == "running" @pytest.fixture
''' This module contains tests of the 'orchestration' of dependent docker units. If a container depends on an image and a container, and that container depends on another container, and so on, then all the contains should be built in the right order. ''' from pytest_docker_tools import build, container, fetch, network, volume redis_image = fetch(repository='redis:latest') redis0 = container(image='{redis_image.id}', environment={ 'MARKER': 'redis0-0sider', }) foobar = build(path='tests/integration') mynetwork = network() myvolume = volume() mycontainer = container( image='{foobar.id}', network='{mynetwork.id}', volumes={ '{myvolume.id}': { 'bind': '/var/tmp' }, }, environment={ 'REDIS_IP': lambda redis0: redis0.ips.primary, }, dns=['{redis0.ips.primary}'],
from airflow.hooks.postgres_hook import PostgresHook from airflow.models import Connection from pytest_docker_tools import container, fetch from testing_examples.operators.postgres_to_local_operator import PostgresToLocalOperator @pytest.fixture(scope="module") def postgres_credentials(): """Namedtuple containing postgres credentials to define only once.""" PostgresCredentials = namedtuple("PostgresCredentials", ["username", "password"]) return PostgresCredentials("testuser", "testpass") postgres_image = fetch(repository="postgres:11.1-alpine") postgres = container( image="{postgres_image.id}", environment={ "POSTGRES_USER": "******", "POSTGRES_PASSWORD": "******", }, ports={"5432/tcp": None}, volumes={ path.join(path.dirname(__file__), "postgres-init.sql"): { "bind": "/docker-entrypoint-initdb.d/postgres-init.sql" } }, )
import csv import json import os from airflow.models import Connection from airflow_db.hooks.postgres import PostgresHook from airflow_db.operators.db_to_fs import DbToFsOperator from airflow_fs.hooks import S3Hook, LocalHook from pytest_docker_tools import container, fetch, network from s3fs import S3FileSystem postgres_image = fetch(repository="postgres:12.1-alpine") s3_image = fetch(repository="minio/minio:RELEASE.2019-12-30T05-45-39Z") s3_init_image = fetch(repository="minio/mc:RELEASE.2019-08-29T00-40-57Z") docker_network = network(name="testnetwork") postgres = container( image="{postgres_image.id}", environment={ "POSTGRES_USER": "******", "POSTGRES_PASSWORD": "******" }, ports={"5432/tcp": None}, volumes={ os.path.join(os.path.dirname(__file__), "postgres-init.sql"): { "bind": "/docker-entrypoint-initdb.d/postgres-init.sql" }, os.path.join(os.path.dirname(__file__), "testdata.csv"): { "bind": "/docker-entrypoint-initdb.d/testdata.csv" }, },
""" This module contains tests of the 'orchestration' of dependent docker units. If a container depends on an image and a container, and that container depends on another container, and so on, then all the contains should be built in the right order. """ from pytest_docker_tools import build, container, fetch, network, volume redis_image = fetch(repository="redis:latest") redis0 = container( image="{redis_image.id}", environment={ "MARKER": "redis0-0sider", }, ) foobar = build(path="tests/integration") mynetwork = network() myvolume = volume() mycontainer = container( image="{foobar.id}", network="{mynetwork.id}", volumes={ "{myvolume.id}": { "bind": "/var/tmp" }, }, environment={ "REDIS_IP": lambda redis0: redis0.ips.primary,
import os import socket from pytest_docker_tools import build, container, fetch from pytest_docker_tools.utils import wait_for_callable test_container_1_image = fetch(repository='redis:latest') test_container_1 = container( image='{test_container_1_image.id}', ports={ '6379/tcp': None, }, ) ipv6_folder = os.path.join(os.path.dirname(__file__), 'fixtures/ipv6') ipv6_image = build(path=ipv6_folder) ipv6 = container(image='{ipv6_image.id}', ports={ '1234/udp': None, }) def test_container_created(docker_client, test_container_1): for c in docker_client.containers.list(ignore_removed=True): if c.id == test_container_1.id: # Looks like we managed to start one! break else: assert False, 'Looks like we failed to start a container'
from pathlib import Path from utils.dates import utcnow from utils.helpers import first_matching_index_value from pytest_docker_tools import build, container, fetch from alerta import generate_meteor_id from alerta import ( get_threshold_alert_shell, get_sequence_alert_shell, get_deadman_alert_shell, ) from alerta import remove_previously_alerted, remove_inflight_events from alerta import save_alert, save_inflight_alert from alerta import determine_threshold_trigger, determine_deadman_trigger from alerta import expire_sequence_alerts, create_sequence_alerts mongo_image = fetch(repository="mongo:latest") mongo_session = container( image="{mongo_image.id}", scope="session", ports={ "27017/tcp": 27017, }, ) print("setting up logging") logging_config_file_path = Path(__file__).parent.joinpath("logging_config.yml") with open(logging_config_file_path, "r") as fd: logging_config = yaml.safe_load(fd) logging.config.dictConfig(logging_config) global logger logger = logging.getLogger()