def _get_warmup_conditions(): if MYSQL_REPLICATION == 'group': return [ CheckDockerLogs( 'node1', "X Plugin ready for connections. Bind-address: '::' port: 33060" ), CheckDockerLogs( 'node2', "X Plugin ready for connections. Bind-address: '::' port: 33060" ), CheckDockerLogs( 'node3', "X Plugin ready for connections. Bind-address: '::' port: 33060" ), init_group_replication, populate_database, ] return [ WaitFor(init_master, wait=2), WaitFor(init_slave, wait=2), CheckDockerLogs( 'mysql-slave', ["ready for connections", "mariadb successfully initialized"]), populate_database, ]
def dd_environment(): """ Start a cluster with one master, one replica and one unhealthy replica and stop it after the tests are done. If there's any problem executing docker-compose, let the exception bubble up. """ couch_version = os.environ["COUCH_VERSION"][0] with docker_run( compose_file=os.path.join( common.HERE, 'compose', 'compose_v{}.yaml'.format(couch_version)), env_vars={'COUCH_PORT': common.PORT}, conditions=[ CheckEndpoints([common.URL]), lambda: generate_data(couch_version), WaitFor(send_replication, args=(couch_version, ), attempts=120), WaitFor(get_replication, args=(couch_version, ), attempts=120), ], ): if couch_version == '1': yield common.BASIC_CONFIG elif couch_version == '2': yield common.BASIC_CONFIG_V2
def dd_environment(config_e2e): logs_path = _mysql_logs_path() with TempDir('logs') as logs_host_path: e2e_metadata = { 'docker_volumes': ['{}:{}'.format(logs_host_path, logs_path)] } with docker_run( os.path.join(common.HERE, 'compose', COMPOSE_FILE), env_vars={ 'MYSQL_DOCKER_REPO': _mysql_docker_repo(), 'MYSQL_PORT': str(common.PORT), 'MYSQL_SLAVE_PORT': str(common.SLAVE_PORT), 'MYSQL_CONF_PATH': _mysql_conf_path(), 'MYSQL_LOGS_HOST_PATH': logs_host_path, 'MYSQL_LOGS_PATH': logs_path, 'WAIT_FOR_IT_SCRIPT_PATH': _wait_for_it_script(), }, conditions=[ WaitFor(init_master, wait=2), WaitFor(init_slave, wait=2), CheckDockerLogs('mysql-slave', [ "ready for connections", "mariadb successfully initialized" ]), populate_database, ], ): yield config_e2e, e2e_metadata
def dd_environment(e2e_instance): with docker_run( os.path.join(HERE, 'compose', 'docker-compose.yaml'), service_name='memcached', env_vars={'PWD': HERE}, conditions=[WaitFor(connect_to_mcache, args=(['{}:{}'.format(HOST, PORT)], USERNAME, PASSWORD))], ): if platform_supports_sockets: with TempDir() as temp_dir: host_socket_path = os.path.join(temp_dir, 'memcached.sock') if not os.path.exists(host_socket_path): os.chmod(temp_dir, 0o777) with docker_run( os.path.join(HERE, 'compose', 'docker-compose.yaml'), service_name='memcached_socket', env_vars={ 'DOCKER_SOCKET_DIR': DOCKER_SOCKET_DIR, 'DOCKER_SOCKET_PATH': DOCKER_SOCKET_PATH, 'HOST_SOCKET_DIR': temp_dir, 'HOST_SOCKET_PATH': host_socket_path, }, conditions=[WaitFor(connect_to_mcache, args=(host_socket_path, USERNAME, PASSWORD))], # Don't worry about spinning down since the outermost runner will already do that down=lambda: None, ): yield e2e_instance else: yield e2e_instance
def dd_environment(instance): image_name = os.environ.get('ELASTIC_IMAGE') compose_file = COMPOSE_FILES_MAP.get(image_name, 'docker-compose.yaml') compose_file = os.path.join(HERE, 'compose', compose_file) with docker_run(compose_file=compose_file, conditions=[WaitFor(ping_elastic), WaitFor(create_slm, attempts=5)]): yield instance
def dd_environment(): if TOX_ENV == 'py38-tls': compose_file = os.path.join(HERE, 'docker', 'docker-compose-tls.yaml') with docker_run(compose_file=compose_file, conditions=[WaitFor(create_tls_database)]): yield E2E_TLS_CONFIG, E2E_METADATA else: compose_file = os.path.join(HERE, 'docker', 'docker-compose.yaml') with docker_run(compose_file=compose_file, conditions=[WaitFor(create_database)]): yield E2E_CONFIG, E2E_METADATA
def dd_environment(): with TempDir("nagios_var_log") as rrd_path: e2e_metadata = deepcopy(E2E_METADATA) e2e_metadata['docker_volumes'] = ['{}:{}'.format(rrd_path, RRD_PATH)] with docker_run( conditions=[WaitFor(setup_db), WaitFor(check_data_available), WaitFor(poll_cacti)], compose_file=os.path.join(HERE, "compose", "docker-compose.yaml"), env_vars={'RRD_PATH': rrd_path}, build=True, ): yield INSTANCE_INTEGRATION, e2e_metadata
def dd_environment(): compose_file = os.path.join(common.HERE, 'compose', 'docker-compose.yml') with docker_run( compose_file, conditions=[ WaitFor(setup_sharding, args=(compose_file,), attempts=5, wait=5), InitializeDB(), WaitFor(create_shard_user, attempts=60, wait=5), ], ): yield common.INSTANCE_BASIC
def legacy_environment(): env = {} env['HAPROXY_CONFIG_DIR'] = os.path.join(HERE, 'compose') env['HAPROXY_CONFIG_OPEN'] = os.path.join(HERE, 'compose', 'haproxy-open.cfg') with docker_run( compose_file=os.path.join(HERE, 'compose', 'haproxy.yaml'), env_vars=env, service_name="haproxy-open", conditions=[WaitFor(wait_for_haproxy_open)], ): if platform_supports_sockets: with TempDir() as temp_dir: host_socket_path = os.path.join(temp_dir, 'datadog-haproxy-stats.sock') env['HAPROXY_CONFIG'] = os.path.join(HERE, 'compose', 'haproxy.cfg') if os.environ.get('HAPROXY_VERSION', '1.5.11').split('.')[:2] >= ['1', '6']: env['HAPROXY_CONFIG'] = os.path.join( HERE, 'compose', 'haproxy-1_6.cfg') env['HAPROXY_SOCKET_DIR'] = temp_dir with docker_run( compose_file=os.path.join(HERE, 'compose', 'haproxy.yaml'), env_vars=env, service_name="haproxy", conditions=[WaitFor(wait_for_haproxy)], ): try: # on linux this needs access to the socket # it won't work without access chown_args = [] user = getpass.getuser() if user != 'root': chown_args += ['sudo'] chown_args += ["chown", user, host_socket_path] subprocess.check_call(chown_args, env=env) except subprocess.CalledProcessError: # it's not always bad if this fails pass config = deepcopy(CHECK_CONFIG) unixsocket_url = 'unix://{0}'.format(host_socket_path) config['unixsocket_url'] = unixsocket_url yield {'instances': [config, CONFIG_TCPSOCKET]} else: yield deepcopy(CHECK_CONFIG_OPEN)
def dd_environment(instance_basic): with docker_run( os.path.join(common.HERE, 'compose', COMPOSE_FILE), env_vars={ 'MYSQL_DOCKER_REPO': _mysql_docker_repo(), 'MYSQL_PORT': str(common.PORT), 'MYSQL_SLAVE_PORT': str(common.SLAVE_PORT), 'WAIT_FOR_IT_SCRIPT_PATH': _wait_for_it_script(), }, conditions=[ WaitFor(connect_master, wait=2), WaitFor(connect_slave, wait=2), ], ): yield instance_basic
def dd_environment(mock_dns, e2e_instance): """ Start a kafka cluster and wait for it to be up and running. """ with docker_run( DOCKER_IMAGE_PATH, conditions=[WaitFor(find_topics, attempts=60, wait=3), WaitFor(initialize_topics)], env_vars={ # Advertising the hostname doesn't work on docker:dind so we manually # resolve the IP address. This seems to also work outside docker:dind # so we got that goin for us. 'KAFKA_HOST': HOST_IP }, ): yield e2e_instance
def dd_environment(): db = DbManager(CONFIG) with docker_run(COMPOSE_FILE, conditions=[db.initialize, WaitFor(db.connect)]): yield CONFIG
def dd_environment(e2e_instance): """ Start a standalone postgres server requiring authentication. """ with docker_run(os.path.join(HERE, 'compose', 'docker-compose.yaml'), conditions=[WaitFor(connect_to_pg)]): yield e2e_instance
def dd_environment(instance): compose_file = os.path.join(common.HERE, 'compose', 'docker-compose.yml') with docker_run( compose_file, conditions=[WaitFor(setup_sharding, args=(compose_file,), attempts=5, wait=5), InitializeDB()] ): yield instance
def dd_environment(): if pyodbc is None: raise Exception("pyodbc is not installed!") def sqlserver_can_connect(): conn = 'DRIVER={};Server={};Database=master;UID=sa;PWD=Password123;'.format( get_local_driver(), DOCKER_SERVER) pyodbc.connect(conn, timeout=30) compose_file = os.path.join(HERE, os.environ["COMPOSE_FOLDER"], 'docker-compose.yaml') conditions = [ WaitFor(sqlserver_can_connect, wait=3, attempts=10), ] if os.environ["COMPOSE_FOLDER"] == 'compose-ha': conditions += [ CheckDockerLogs( compose_file, 'Always On Availability Groups connection with primary database established for secondary database', ) ] with docker_run( compose_file=compose_file, conditions=conditions, mount_logs=True, ): yield FULL_E2E_CONFIG
def dd_environment(): """ Spin up and initialize couchbase """ with docker_run(compose_file=os.path.join(HERE, 'compose', 'standalone.compose'), env_vars={'CB_CONTAINER_NAME': CB_CONTAINER_NAME}, conditions=[ WaitFor(couchbase_container, attempts=15), WaitFor(couchbase_init, attempts=15), WaitFor(couchbase_setup, attempts=15), WaitFor(node_stats, attempts=15), WaitFor(bucket_stats, attempts=15), ]): yield DEFAULT_INSTANCE
def dd_environment(): instance = { 'server': '{}:{}'.format(HOST, PORT), 'user': USER, 'password': PASSWORD, 'service_name': 'InfraDB.us.oracle.com', } if CLIENT_LIB == 'jdbc': e2e_metadata = E2E_METADATA_JDBC_CLIENT instance[ 'jdbc_driver_path'] = '/opt/oracle/instantclient_19_3/ojdbc8.jar' else: e2e_metadata = E2E_METADATA_ORACLE_CLIENT with docker_run( COMPOSE_FILE, conditions=[ CheckDockerLogs(COMPOSE_FILE, ['The database is ready for use'], wait=5, attempts=120), WaitFor(create_user), ], env_vars={'ORACLE_DATABASE_VERSION': ORACLE_DATABASE_VERSION}, attempts=20, attempts_wait=5, ): yield instance, e2e_metadata
def dd_environment(): """ Start postgres and install pgbouncer. If there's any problem executing docker-compose, let the exception bubble up. """ with docker_run( compose_file=os.path.join(HERE, 'compose', 'docker-compose.yml'), env_vars={'TEST_RESOURCES_PATH': os.path.join(HERE, 'resources')}, conditions=[ WaitFor(container_up, args=("Postgres", 5432)), WaitFor(container_up, args=("PgBouncer", common.PORT)), ], ): yield common.DEFAULT_INSTANCE
def dd_environment(instance): compose_file = os.path.join(HERE, 'docker', 'docker-compose.yml') env = {'COREDNS_CONFIG_FOLDER': CONFIG_FOLDER} with docker_run(compose_file, conditions=[WaitFor(init_coredns)], env_vars=env): yield instance
def dd_environment(): """ Spin up and initialize couchbase """ with docker_run( compose_file=os.path.join(HERE, 'compose', CONTAINER_NAME), env_vars={ 'NEXTCLOUD_ADMIN_USER': USER, 'NEXTCLOUD_ADMIN_PASSWORD': PASSWORD }, conditions=[ WaitFor(nextcloud_container, attempts=15), WaitFor(nextcloud_install, attempts=15), WaitFor(nextcloud_add_trusted_domain, attempts=15), WaitFor(nextcloud_stats, attempts=15), ], ): yield BASE_CONFIG
def dd_environment(instance_basic): with docker_run( os.path.join(common.HERE, 'compose', COMPOSE_FILE), env_vars={ 'MYSQL_DOCKER_REPO': _mysql_docker_repo(), 'MYSQL_PORT': str(common.PORT), 'MYSQL_SLAVE_PORT': str(common.SLAVE_PORT), 'WAIT_FOR_IT_SCRIPT_PATH': _wait_for_it_script(), }, conditions=[ WaitFor(init_master, wait=2), WaitFor(init_slave, wait=2), CheckDockerLogs('mysql-slave', [ "ready for connections", "mariadb successfully initialized" ]), populate_database, ], ): yield instance_basic
def dd_environment(instance_basic): with docker_run( os.path.join(common.HERE, 'compose', COMPOSE_FILE), env_vars={ 'MYSQL_DOCKER_REPO': _mysql_docker_repo(), 'MYSQL_PORT': str(common.PORT), 'MYSQL_SLAVE_PORT': str(common.SLAVE_PORT), 'WAIT_FOR_IT_SCRIPT_PATH': _wait_for_it_script(), }, conditions=[ WaitFor(init_master, wait=2), WaitFor(init_slave, wait=2) ], ): master_conn = pymysql.connect(host=common.HOST, port=common.PORT, user='******') _populate_database(master_conn) yield instance_basic
def dd_environment(): """ Start postgres and install pgbouncer. If there's any problem executing `docker compose`, let the exception bubble up. """ compose_file = 'docker-compose.yml' env_version = common.get_version_from_env() if env_version < version.parse('1.10'): compose_file = 'docker-compose-old.yml' with docker_run( compose_file=os.path.join(HERE, 'compose', compose_file), env_vars={'TEST_RESOURCES_PATH': os.path.join(HERE, 'resources')}, conditions=[ WaitFor(container_up, args=("Postgres", 5432)), WaitFor(container_up, args=("PgBouncer", common.PORT)), ], ): yield common.DEFAULT_INSTANCE
def dd_environment(full_e2e_config): if pyodbc is None: raise Exception("pyodbc is not installed!") def sqlserver_can_connect(): conn_str = 'DRIVER={};Server={};Database=master;UID=sa;PWD=Password123;'.format( get_local_driver(), DOCKER_SERVER) pyodbc.connect(conn_str, timeout=DEFAULT_TIMEOUT, autocommit=True) def high_cardinality_env_is_ready(): return HighCardinalityQueries({ 'driver': get_local_driver(), 'host': DOCKER_SERVER, 'username': '******', 'password': '******' }).is_ready() compose_file = os.path.join(HERE, os.environ["COMPOSE_FOLDER"], 'docker-compose.yaml') conditions = [WaitFor(sqlserver_can_connect, wait=3, attempts=10)] completion_message = 'INFO: setup.sql completed.' if os.environ["COMPOSE_FOLDER"] == 'compose-ha': completion_message = ( 'Always On Availability Groups connection with primary database established ' 'for secondary database') if 'compose-high-cardinality' in os.environ["COMPOSE_FOLDER"]: # This env is a highly loaded database and is expected to take a while to setup. # This will wait about 8 minutes before timing out. conditions += [ WaitFor(high_cardinality_env_is_ready, wait=5, attempts=90) ] conditions += [CheckDockerLogs(compose_file, completion_message)] with docker_run(compose_file=compose_file, conditions=conditions, mount_logs=True, build=True, attempts=2): yield full_e2e_config, E2E_METADATA
def dd_environment(): if pyodbc is None: raise Exception("pyodbc is not installed!") def sqlserver(): conn = 'DRIVER={};Server={},{};Database=master;UID=sa;PWD=Password123;'.format( lib_tds_path(), HOST, PORT) pyodbc.connect(conn, timeout=30) with docker_run(compose_file=os.path.join(HERE, 'compose', 'docker-compose.yaml'), conditions=[WaitFor(sqlserver, wait=3, attempts=10)]): yield FULL_CONFIG
def dd_environment(): db = DbManager(ADMIN_CONFIG) with docker_run( COMPOSE_FILE, conditions=[ CheckDockerLogs(COMPOSE_FILE, ['Startup finished!'], wait=5, attempts=120), WaitFor(db.connect), db.initialize, ], env_vars={'PASSWORD': ADMIN_CONFIG['password']}, ): yield CONFIG, E2E_METADATA
def dd_environment(instance_single_node_install): """ Start a cluster with one master, one replica, and one unhealthy replica. """ env_vars = { 'CONSUL_CONFIG_PATH': _consul_config_path(), 'CONSUL_PORT': common.PORT } with docker_run(os.path.join(common.HERE, 'compose', 'compose.yaml'), conditions=[WaitFor(ping_cluster)], env_vars=env_vars): yield instance_single_node_install
def dd_environment(): if pyodbc is None: raise Exception("pyodbc is not installed!") def sqlserver(): conn = 'DRIVER={};Server={};Database=master;UID=sa;PWD=Password123;'.format(get_local_driver(), DOCKER_SERVER) pyodbc.connect(conn, timeout=30) with docker_run( compose_file=os.path.join(HERE, 'compose', 'docker-compose.yaml'), conditions=[WaitFor(sqlserver, wait=3, attempts=10)], ): yield FULL_E2E_CONFIG, E2E_METADATA
def dd_environment(e2e_instance): """ Start a kafka cluster and wait for it to be up and running. """ with docker_run( os.path.join(HERE, 'docker', 'docker-compose.yaml'), conditions=[WaitFor(find_topics, attempts=30, wait=3), initialize_topics], env_vars={ # Advertising the hostname doesn't work on docker:dind so we manually # resolve the IP address. This seems to also work outside docker:dind # so we got that goin for us. 'KAFKA_HOST': HOST_IP }, ): yield e2e_instance, E2E_METADATA
def _common_pyodbc_connect(conn_str): # all connections must have the correct timeouts set # if the statement timeout is not set then the integration tests can *hang* for a very long time if, for example, # a query is blocked on something. conn = pyodbc.connect(conn_str, timeout=DEFAULT_TIMEOUT, autocommit=True) conn.timeout = DEFAULT_TIMEOUT def _sanity_check_query(): with conn.cursor() as cursor: cursor.execute("select 1") cursor.fetchall() WaitFor(_sanity_check_query, wait=3, attempts=10)() return conn