def dd_environment(): if common.MQ_VERSION == 9: log_pattern = "AMQ5026I: The listener 'DEV.LISTENER.TCP' has started. ProcessId" elif common.MQ_VERSION == 8: log_pattern = r".*QMNAME\({}\)\s*STATUS\(Running\).*".format( common.QUEUE_MANAGER) else: raise RuntimeError('Invalid version: {}'.format(common.MQ_VERSION)) e2e_meta = copy.deepcopy(common.E2E_METADATA) e2e_meta.setdefault('docker_volumes', []) e2e_meta['docker_volumes'].append("{}:/opt/pki/keys".format( os.path.join(common.HERE, 'keys'))) conditions = [CheckDockerLogs('ibm_mq1', log_pattern)] if not ON_WINDOWS: conditions.append(WaitFor(prepare_queue_manager)) with docker_run(compose_file=common.COMPOSE_FILE_PATH, build=True, conditions=conditions, sleep=10, attempts=2): yield common.INSTANCE, e2e_meta
def dd_environment(): compose_file = os.path.join(HERE, 'compose', 'docker-compose.yaml') with docker_run( compose_file, conditions=[ # Kafka Broker CheckDockerLogs('broker', 'Created log for partition _confluent'), # Kafka Schema Registry CheckDockerLogs('schema-registry', 'Server started, listening for requests...', attempts=90), # Kafka Connect CheckDockerLogs('connect', 'Kafka Connect started', attempts=120), # Create connectors WaitFor(create_connectors), CheckDockerLogs( 'connect', 'flushing 0 outstanding messages for offset commit'), ], attempts=2, ): yield CHECK_CONFIG, {'use_jmx': True}
def dd_environment(): instance = NEO4J_MINIMAL_CONFIG with docker_run( os.path.join(DOCKER_DIR, 'docker-compose.yaml'), log_patterns='Remote interface available at', conditions=[WaitFor(init_user)], ): yield instance
def dd_environment(): env = {'HOSTNAME': HOST} with docker_run( compose_file=os.path.join(HERE, "compose", "docker-compose.yaml"), conditions=[WaitFor(setup_mapreduce, attempts=240, wait=5)], env_vars=env, ): yield INSTANCE_INTEGRATION
def dd_environment(): with docker_run( COMPOSE_FILE, conditions=[ CheckDockerLogs(COMPOSE_FILE, ['service ready: soon there will be cake!']), WaitFor(init_db), ], ): yield INSTANCE
def dd_environment(instance): compose_file = os.path.join(get_here(), 'docker', 'docker-compose.yaml') with docker_run(compose_file, conditions=[ WaitFor(make_query), CheckDockerLogs(compose_file, 'SERVER STARTED') ]): yield instance, {'use_jmx': True}
def dd_environment(): env = {'HOSTNAME': HOST} with docker_run( compose_file=os.path.join(HERE, "compose", "docker-compose.yaml"), conditions=[WaitFor(setup_mapreduce, attempts=5, wait=5)], env_vars=env, ): # 'custom_hosts' in metadata provides native /etc/hosts mappings in the agent's docker container yield INSTANCE_INTEGRATION, {'custom_hosts': get_custom_hosts()}
def dd_environment(): instance = NEO4J_MINIMAL_CONFIG envs = {'NEO4J_VERSION': os.environ['NEO4J_VERSION']} with docker_run( os.path.join(DOCKER_DIR, 'docker-compose.yaml'), env_vars=envs, log_patterns=['Remote interface available at'], conditions=[WaitFor(init_user)], ): yield instance
def dd_environment(): with docker_run( COMPOSE_FILE, conditions=[ CheckEndpoints(['http://{}:{}'.format(HOST, PORT)]), CheckDockerLogs(COMPOSE_FILE, ['service ready: soon there will be cake!']), WaitFor(init_db), ], ): yield INSTANCE
def dd_environment(): instance = INSTANCE image = os.environ.get("NEO4J_IMAGE", f"neo4j:{os.environ['NEO4J_VERSION']}-enterprise") with docker_run( os.path.join(DOCKER_DIR, 'docker-compose.yaml'), env_vars={'NEO4J_IMAGE': image}, log_patterns=['Remote interface available at'], conditions=[WaitFor(ensure_prometheus_endpoint_is_accessable)], ): yield instance
def dd_environment(instance): compose_file = os.path.join(get_here(), "docker", "docker-compose.yaml") with docker_run( compose_file, conditions=[ WaitFor(make_query), CheckDockerLogs(compose_file, "SERVER STARTED"), ], ): yield instance, {"use_jmx": True}
def dd_environment(): """ Start a cluster with one master, one replica and one unhealthy replica and stop it after the tests are done. If there's any problem executing `docker compose`, let the exception bubble up. """ couch_version = os.environ["COUCH_VERSION"][0] if couch_version == "1": with docker_run( compose_file=os.path.join(common.HERE, 'compose', 'compose_v1.yaml'), env_vars={'COUCH_PORT': common.PORT}, conditions=[ CheckEndpoints([common.URL]), CheckDockerLogs('couchdb-1', ['CouchDB has started', 'Application couch_index started']), WaitFor(generate_data, args=(couch_version,)), ], ): yield common.BASIC_CONFIG else: with docker_run( compose_file=os.path.join(common.HERE, 'compose', 'compose_v2.yaml'), env_vars={'COUCH_PORT': common.PORT, 'COUCH_USER': common.USER, 'COUCH_PASSWORD': common.PASSWORD}, conditions=[ CheckEndpoints([common.URL]), CheckDockerLogs('couchdb-1', ['Started replicator db changes listener']), WaitFor(enable_cluster), WaitFor(generate_data, args=(couch_version,)), WaitFor(check_node_stats), WaitFor(send_replication), WaitFor(get_replication), ], ): yield common.BASIC_CONFIG_V2
def dd_environment(): with docker_run( compose_file=os.path.join(HERE, 'docker', 'docker-compose.yaml'), build=True, conditions=[ CheckEndpoints([ 'http://{}:4040/api/v1/applications'.format(HOST), 'http://{}:4050/api/v1/applications'.format(HOST), 'http://{}:4050/metrics/json'.format(HOST), ]), WaitFor(check_metrics_available, wait=5), ], ): yield INSTANCE_STANDALONE, {'custom_hosts': get_custom_hosts()}
def dd_environment(): with TempDir('log') as log_dir: with docker_run( os.path.join(get_here(), 'compose', 'docker-compose.yml'), env_vars={'LOG_DIR': log_dir}, conditions=[WaitFor(setup_ignite)], log_patterns="Ignite node started OK", ): instance = load_jmx_config() instance['instances'][0]['port'] = 49112 instance['instances'][0]['host'] = get_docker_hostname() metadata = E2E_METADATA.copy() metadata['docker_volumes'] = ['{}:/var/log/ignite'.format(log_dir)] yield instance, metadata
def dd_environment(): compose_file = os.path.join(get_here(), 'compose', 'docker-compose.yml') with TempDir('proxysql-data') as tmp_dir: with docker_run( compose_file, env_vars={ 'PROXY_ADMIN_PORT': str(PROXY_ADMIN_PORT), 'PROXY_PORT': str(PROXY_PORT), 'MYSQL_PORT': str(MYSQL_PORT), 'MYSQL_DATABASE': MYSQL_DATABASE, 'MYSQL_USER': MYSQL_USER, 'MYSQL_PASS': MYSQL_PASS, 'TMP_DATA_DIR': tmp_dir, }, conditions=[ CheckDockerLogs('db', ["mysqld: ready for connections"], wait=5), CheckDockerLogs('proxysql', ["read_only_action RO=0 phase 3"], wait=5), WaitFor(init_mysql, wait=2), WaitFor(init_proxy, wait=2), ], ): instance = deepcopy(INSTANCE_ALL_METRICS) cert_src = os.path.join(tmp_dir, 'proxysql-ca.pem') cert_dest = "/etc/ssl/certs/proxysql-ca.pem" if PROXYSQL_VERSION.startswith('2'): # SSL is only available with version 2.x of ProxySQL instance['tls_verify'] = True instance['tls_ca_cert'] = cert_dest instance['validate_hostname'] = False yield instance, { 'docker_volumes': ['{}:{}'.format(cert_src, cert_dest)] }
def dd_environment(): env = { 'APACHE_CONFIG': os.path.join(HERE, 'compose', 'httpd.conf'), 'APACHE_DOCKERFILE': os.path.join(HERE, 'compose', 'Dockerfile'), } with docker_run( compose_file=os.path.join(HERE, 'compose', 'apache.yaml'), env_vars=env, conditions=[ CheckEndpoints([STATUS_URL]), generate_metrics, WaitFor(check_status_page_ready) ], mount_logs=True, sleep=20, ): yield STATUS_CONFIG
def uds_path(): if Platform.is_mac(): # See: https://github.com/docker/for-mac/issues/483 pytest.skip('Sharing Unix sockets is not supported by Docker for Mac.') with TempDir() as tmp_dir: compose_file = os.path.join(HERE, 'compose', 'uds.yaml') uds_filename = 'tmp.sock' uds_path = os.path.join(tmp_dir, uds_filename) with docker_run( compose_file=compose_file, env_vars={ "UDS_HOST_DIRECTORY": tmp_dir, 'UDS_FILENAME': uds_filename, }, conditions=[WaitFor(lambda: os.path.exists(uds_path))], attempts=2, ): yield uds_path
def dd_environment(): compose_file = os.path.join(common.HERE, 'docker', 'docker-compose.yaml') with docker_run( compose_file, build=True, mount_logs=True, conditions=[ CheckDockerLogs('hazelcast_management_center', ['Hazelcast Management Center successfully started']), CheckDockerLogs('hazelcast_management_center', ['Started communication with member']), CheckDockerLogs('hazelcast2', [r'Hazelcast JMX agent enabled']), CheckDockerLogs('hazelcast2', [r'is STARTED']), WaitFor(trigger_some_tcp_data), ], attempts=5, attempts_wait=5, ): config = load_jmx_config() config['instances'] = common.INSTANCE_MEMBERS + [common.INSTANCE_MC_JMX, common.INSTANCE_MC_PYTHON] yield config, {'use_jmx': True}
def dd_environment(): with TempDir('log') as log_dir: docker_volumes = ['{}:/var/log/ignite'.format(log_dir)] conditions = [] jvm_opts = '' if common.IS_PRE_2_9: # Activate JMX through 'control.sh' and functions made available to 'ignite.sh'. functions_sh = os.path.join(common.HERE, 'compose', 'functions.sh') docker_volumes.append( '{}:/opt/ignite/apache-ignite/bin/include/functions.sh'.format( functions_sh)) conditions.append(WaitFor(control_sh_activate)) else: # On 2.9.0 and above, the Ignite Docker image calls the JVM directly, # so JMX configuration should be set via JVM options. # See: https://ignite.apache.org/docs/latest/installation/installing-using-docker jvm_opts = ('-Dcom.sun.management.jmxremote ' '-Dcom.sun.management.jmxremote.port=49112 ' '-Dcom.sun.management.jmxremote.rmi.port=49112 ' '-Dcom.sun.management.jmxremote.authenticate=false ' '-Dcom.sun.management.jmxremote.ssl=false') env_vars = { 'IGNITE_IMAGE': common.IGNITE_IMAGE, 'JVM_OPTS': jvm_opts, 'LOG_DIR': log_dir, } with docker_run( os.path.join(get_here(), 'compose', 'docker-compose.yml'), env_vars=env_vars, conditions=conditions, log_patterns="Ignite node started OK", attempts=2, ): instance = load_jmx_config() instance['instances'][0]['port'] = 49112 instance['instances'][0]['host'] = get_docker_hostname() metadata = E2E_METADATA.copy() metadata['docker_volumes'] = docker_volumes yield instance, metadata
def dd_environment(): compose_file = os.path.join(HERE, 'compose', 'docker-compose.yaml') with docker_run( compose_file, conditions=[ # Kafka Broker CheckDockerLogs('broker', 'Monitored service is now ready'), # Kafka Schema Registry CheckDockerLogs('schema-registry', 'Server started, listening for requests...', attempts=90), # Kafka Connect CheckDockerLogs('connect', 'Kafka Connect started', attempts=120), # Create connectors WaitFor(create_connectors), ], ): yield CHECK_CONFIG, {'use_jmx': True}
def dd_environment(): compose_file = os.path.join(get_here(), 'compose', 'docker-compose.yaml') # Build the topology jar to use in the environment with docker_run(compose_file, build=True, service_name='topology-maker', sleep=15): run_command([ 'docker', 'cp', 'topology-build:/topology.jar', os.path.join(get_here(), 'compose') ]) nimbus_condition = WaitFor(wait_for_thrift) with docker_run(compose_file, service_name='storm-nimbus', conditions=[nimbus_condition]): with docker_run(compose_file, service_name='storm-ui', log_patterns=[r'org.apache.storm.ui.core']): with docker_run( compose_file, service_name='topology', log_patterns=['Finished submitting topology: topology']): yield INSTANCE
def test_error_fail(self): def f(): raise Exception with pytest.raises(RetryError): WaitFor(f, attempts=1)()
def test_no_error_non_true_result_fail(self): with pytest.raises(RetryError): WaitFor(lambda: False, attempts=1)()
def test_no_error_true_result_success(self): assert WaitFor(lambda: True)() is True
def dd_environment(e2e_instance): if not common.E2E_IOT_EDGE_CONNSTR: raise RuntimeError( "IOT_EDGE_CONNSTR must be set to start or stop the E2E environment." ) if common.E2E_IOT_EDGE_TLS_ENABLED: compose_filename = 'docker-compose-tls.yaml' else: compose_filename = 'docker-compose.yaml' compose_file = os.path.join(common.HERE, 'compose', compose_filename) conditions = [ CheckDockerLogs(compose_file, r'[mgmt] .* 200 OK', wait=5), # Verify Security Manager boots. CheckDockerLogs(compose_file, 'Successfully started module edgeAgent', wait=5), CheckDockerLogs(compose_file, 'Successfully started module edgeHub', wait=5), CheckDockerLogs( compose_file, 'Successfully started module SimulatedTemperatureSensor', wait=5), WaitFor(e2e_utils.edge_hub_endpoint_ready), WaitFor(e2e_utils.edge_agent_endpoint_ready), ] env_vars = { "E2E_LIBIOTHSM_STD_URL": common.E2E_LIBIOTHSM_STD_URL, "E2E_IOTEDGE_URL": common.E2E_IOTEDGE_URL, "E2E_IMAGE": common.E2E_IMAGE, "E2E_IOT_EDGE_CONNSTR": common.E2E_IOT_EDGE_CONNSTR, } if common.E2E_IOT_EDGE_TLS_ENABLED: for path in ( common.E2E_IOT_EDGE_DEVICE_CA_CERT, common.E2E_IOT_EDGE_DEVICE_CA_CERT, common.E2E_IOT_EDGE_DEVICE_CA_PK, ): if not os.path.exists(path): message = ( "Path {!r} does not exist. " "Please follow instructions in azure_iot_edge/tests/tls/README.md to " "configure test TLS certificates.").format(path) raise RuntimeError(message) env_vars.update({ "E2E_IOT_EDGE_ROOT_CA_CERT": common.E2E_IOT_EDGE_ROOT_CA_CERT, "E2E_IOT_EDGE_DEVICE_CA_CERT": common.E2E_IOT_EDGE_DEVICE_CA_CERT, "E2E_IOT_EDGE_DEVICE_CA_PK": common.E2E_IOT_EDGE_DEVICE_CA_PK, }) up = e2e_utils.IoTEdgeUp(compose_file, network_name=common.E2E_NETWORK) down = e2e_utils.IoTEdgeDown( compose_file, stop_extra_containers=common.E2E_EXTRA_SPAWNED_CONTAINERS) with docker_run(conditions=conditions, env_vars=env_vars, up=up, down=down): yield e2e_instance, common.E2E_METADATA