def dd_environment(e2e_instance):
    with TempDir('vault-jwt') as jwt_dir, TempDir('vault-sink') as sink_dir:
        token_file = os.path.join(sink_dir, 'token')

        if not os.path.exists(token_file):
            os.chmod(sink_dir, 0o777)
            create_file(token_file)
            os.chmod(token_file, 0o777)

        with docker_run(
                COMPOSE_FILE,
                env_vars={
                    'JWT_DIR': jwt_dir,
                    'SINK_DIR': sink_dir
                },
                conditions=[
                    WaitAndUnsealVault(HEALTH_ENDPOINT),
                    ApplyPermissions(token_file)
                ],
                sleep=10,
                mount_logs=True,
        ):
            set_client_token_path(token_file)

            yield e2e_instance, {
                'docker_volumes': ['{}:/home/vault-sink'.format(sink_dir)]
            }
示例#2
0
def download_cert(name, host, raw=False):
    host = urlparse(host).hostname or host
    context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLS)
    context.check_hostname = False
    context.verify_mode = ssl.CERT_NONE

    for _ in range(20):
        try:
            with closing(socket.create_connection((host, 443))) as sock:
                with closing(context.wrap_socket(
                        sock, server_hostname=host)) as secure_sock:
                    cert = secure_sock.getpeercert(binary_form=True)
        except Exception:  # no cov
            time.sleep(3)
        else:
            break
    else:  # no cov
        raise Exception('Unable to connect to {}'.format(host))

    with TempDir() as d:
        path = os.path.join(d, name)

        if raw:
            with open(path, 'wb') as f:
                f.write(cert)
        else:
            cert = ssl.DER_cert_to_PEM_cert(cert)
            with open(path, 'w') as f:
                f.write(cert)

        yield path
示例#3
0
def dd_environment():
    with TempDir() as nats_dir:
        env_vars = {'TEMP_DIR': nats_dir}
        with docker_run(os.path.join(DOCKER_DIR, 'docker-compose.yml'),
                        env_vars=env_vars,
                        log_patterns='test.channel3'):
            yield
示例#4
0
def dd_environment():
    with TempDir() as temp_dir:
        # No tear down necessary as `TempDir` will do the clean up
        with environment_run(CreateQueues(temp_dir), lambda: None) as result:
            set_env_vars({k: serialize_data(v) for k, v in result.items()})

            yield get_e2e_instance(), get_e2e_metadata()
示例#5
0
def dd_environment():
    # use os.path.realpath to avoid mounting issues of symlinked /var -> /private/var in Docker on macOS
    with TempDir() as tmp_dir:
        activemq_data_dir = os.path.join(tmp_dir, "data")
        fixture_archive = os.path.join(HERE, "fixtures",
                                       "apache-activemq-kahadb.tar.gz")
        os.mkdir(activemq_data_dir)
        with tarfile.open(fixture_archive, "r:gz") as f:
            f.extractall(path=activemq_data_dir)
        os.chmod(os.path.join(activemq_data_dir, "kahadb"),
                 stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
        os.chmod(os.path.join(activemq_data_dir, "kahadb", "db-1.log"),
                 stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
        os.chmod(os.path.join(activemq_data_dir, "kahadb", "db.data"),
                 stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
        os.chmod(os.path.join(activemq_data_dir, "kahadb", "db.redo"),
                 stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)

        with docker_run(
                compose_file=os.path.join(HERE, 'compose',
                                          'docker-compose.yaml'),
                env_vars={"ACTIVEMQ_DATA_DIR": activemq_data_dir},
                endpoints=URL,
        ):
            yield CONFIG
示例#6
0
def dd_environment(e2e_instance):
    with docker_run(
        os.path.join(HERE, 'compose', 'docker-compose.yaml'),
        service_name='memcached',
        env_vars={'PWD': HERE},
        conditions=[WaitFor(connect_to_mcache, args=(['{}:{}'.format(HOST, PORT)], USERNAME, PASSWORD))],
    ):
        if platform_supports_sockets:
            with TempDir() as temp_dir:
                host_socket_path = os.path.join(temp_dir, 'memcached.sock')

                if not os.path.exists(host_socket_path):
                    os.chmod(temp_dir, 0o777)

                with docker_run(
                    os.path.join(HERE, 'compose', 'docker-compose.yaml'),
                    service_name='memcached_socket',
                    env_vars={
                        'DOCKER_SOCKET_DIR': DOCKER_SOCKET_DIR,
                        'DOCKER_SOCKET_PATH': DOCKER_SOCKET_PATH,
                        'HOST_SOCKET_DIR': temp_dir,
                        'HOST_SOCKET_PATH': host_socket_path,
                    },
                    conditions=[WaitFor(connect_to_mcache, args=(host_socket_path, USERNAME, PASSWORD))],
                    # Don't worry about spinning down since the outermost runner will already do that
                    down=lambda: None,
                ):
                    yield e2e_instance
        else:
            yield e2e_instance
示例#7
0
def dd_environment():
    new_e2e_metadata = deepcopy(E2E_METADATA)
    with TempDir('snmp') as tmp_dir:
        data_dir = os.path.join(tmp_dir, 'data')
        env = {'DATA_DIR': data_dir}
        if not os.path.exists(data_dir):
            shutil.copytree(os.path.join(COMPOSE_DIR, 'data'), data_dir)
            for data_file in FILES:
                response = requests.get(data_file)
                with open(os.path.join(data_dir, data_file.rsplit('/', 1)[1]), 'wb') as output:
                    output.write(response.content)

        with docker_run(os.path.join(COMPOSE_DIR, 'docker-compose.yaml'), env_vars=env, log_patterns="Listening at"):
            if SNMP_LISTENER_ENV == 'true':
                instance_config = {}
                new_e2e_metadata['docker_volumes'] = [
                    '{}:/etc/datadog-agent/datadog.yaml'.format(create_datadog_conf_file(tmp_dir))
                ]
            else:
                instance_config = generate_container_instance_config([])
                instance_config['init_config'].update(
                    {
                        'loader': 'core',
                        'use_device_id_as_hostname': True,
                        # use hostname as namespace to create different device for each user
                        'namespace': socket.gethostname(),
                    }
                )
            yield instance_config, new_e2e_metadata
示例#8
0
def dd_environment():
    new_e2e_metadata = deepcopy(E2E_METADATA)
    with TempDir('snmp') as tmp_dir:
        data_dir = os.path.join(tmp_dir, 'data')
        env = {'DATA_DIR': data_dir}
        if not os.path.exists(data_dir):
            shutil.copytree(os.path.join(COMPOSE_DIR, 'data'), data_dir)
            for data_file in FILES:
                response = requests.get(data_file)
                with open(os.path.join(data_dir,
                                       data_file.rsplit('/', 1)[1]),
                          'wb') as output:
                    output.write(response.content)

        with docker_run(os.path.join(COMPOSE_DIR, 'docker-compose.yaml'),
                        env_vars=env,
                        log_patterns="Listening at"):
            if AUTODISCOVERY_TYPE == 'agent':
                instance_config = {}
                new_e2e_metadata['docker_volumes'] = [
                    '{}:/etc/datadog-agent/datadog.yaml'.format(
                        create_datadog_conf_file(tmp_dir))
                ]
            else:
                instance_config = generate_container_instance_config(
                    SCALAR_OBJECTS + SCALAR_OBJECTS_WITH_TAGS +
                    TABULAR_OBJECTS)
            yield instance_config, new_e2e_metadata
示例#9
0
def dd_environment():
    """
        Start the cassandra cluster with required configuration
    """
    env = os.environ
    compose_file = os.path.join(common.HERE, 'compose', 'docker-compose.yaml')
    env['CONTAINER_PORT'] = common.PORT

    # We need to restrict permission on the password file
    # Create a temporary file so if we have to run tests more than once on a machine
    # the original file's perms aren't modified
    with TempDir() as tmpdir:
        jmx_pass_file = os.path.join(common.HERE, "compose",
                                     'jmxremote.password')
        copy_path(jmx_pass_file, tmpdir)
        temp_jmx_file = os.path.join(tmpdir, 'jmxremote.password')
        env['JMX_PASS_FILE'] = temp_jmx_file
        os.chmod(temp_jmx_file, stat.S_IRWXU)
        with docker_run(compose_file,
                        service_name=common.CASSANDRA_CONTAINER_NAME,
                        log_patterns=['Listening for thrift clients']):
            cassandra_seed = get_container_ip("{}".format(
                common.CASSANDRA_CONTAINER_NAME))
            env['CASSANDRA_SEEDS'] = cassandra_seed
            with docker_run(compose_file,
                            service_name=common.CASSANDRA_CONTAINER_NAME_2,
                            log_patterns=['All sessions completed']):
                subprocess.check_call([
                    "docker", "exec", common.CASSANDRA_CONTAINER_NAME, "cqlsh",
                    "-e", "CREATE KEYSPACE IF NOT EXISTS test \
                    WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor':2}"
                ])
                yield common.CONFIG_INSTANCE, 'local'
示例#10
0
def dd_environment(config_e2e):
    logs_path = _mysql_logs_path()

    with TempDir('logs') as logs_host_path:
        e2e_metadata = {
            'docker_volumes': ['{}:{}'.format(logs_host_path, logs_path)]
        }

        with docker_run(
                os.path.join(common.HERE, 'compose', COMPOSE_FILE),
                env_vars={
                    'MYSQL_DOCKER_REPO': _mysql_docker_repo(),
                    'MYSQL_PORT': str(common.PORT),
                    'MYSQL_SLAVE_PORT': str(common.SLAVE_PORT),
                    'MYSQL_CONF_PATH': _mysql_conf_path(),
                    'MYSQL_LOGS_HOST_PATH': logs_host_path,
                    'MYSQL_LOGS_PATH': logs_path,
                    'WAIT_FOR_IT_SCRIPT_PATH': _wait_for_it_script(),
                },
                conditions=[
                    WaitFor(init_master, wait=2),
                    WaitFor(init_slave, wait=2),
                    CheckDockerLogs('mysql-slave', [
                        "ready for connections",
                        "mariadb successfully initialized"
                    ]),
                    populate_database,
                ],
        ):
            yield config_e2e, e2e_metadata
示例#11
0
def instance_ok():
    with TempDir() as d:
        yield {
            'reboot_signal_file': touch(join(d, 'reboot-required.freshly_minted')),
            'created_at_file': touch(join(d, 'reboot-required.created_at.freshly_minted')),
            'days_warning': 7,
            'days_critical': 14
        }
示例#12
0
def temp_binary(contents):
    with TempDir() as d:
        path = os.path.join(d, 'temp')

        with open(path, 'wb') as f:
            f.write(contents)

        yield path
 def test_ca_cert_dir(self):
     with patch('ssl.SSLContext'), TempDir("test_ca_cert_file") as tmp_dir:
         instance = {'tls_ca_cert': tmp_dir}
         check = AgentCheck('test', {}, [instance])
         context = check.get_tls_context()  # type: MagicMock
         context.load_verify_locations.assert_called_with(cafile=None,
                                                          capath=tmp_dir,
                                                          cadata=None)
示例#14
0
 def test_ca_cert_file(self):
     with patch('ssl.SSLContext'), TempDir("test_ca_cert_file") as tmp_dir:
         filename = os.path.join(tmp_dir, 'foo')
         open(filename, 'w').close()
         instance = {'tls_ca_cert': filename}
         check = AgentCheck('test', {}, [instance])
         context = check.get_tls_context()  # type: MagicMock
         context.load_verify_locations.assert_called_with(cafile=filename, capath=None, cadata=None)
示例#15
0
def dd_agent_check(request, aggregator):
    if not e2e_testing():
        pytest.skip('Not running E2E tests')

    # Lazily import to reduce plugin load times for everyone
    from datadog_checks.dev import TempDir, run_command

    def run_check(config=None, **kwargs):
        root = os.path.dirname(request.module.__file__)
        while True:
            if os.path.isfile(os.path.join(root, 'setup.py')):
                check = os.path.basename(root)
                break

            new_root = os.path.dirname(root)
            if new_root == root:
                raise OSError('No Datadog Agent check found')

            root = new_root

        python_path = os.environ[E2E_PARENT_PYTHON]
        env = os.environ['TOX_ENV_NAME']

        check_command = [python_path, '-m', 'datadog_checks.dev', 'env', 'check', check, env, '--json']

        if config:
            config = format_config(config)
            config_file = os.path.join(temp_dir, '{}-{}-{}.json'.format(check, env, urlsafe_b64encode(os.urandom(6))))

            with open(config_file, 'wb') as f:
                output = json.dumps(config).encode('utf-8')
                f.write(output)
            check_command.extend(['--config', config_file])

        for key, value in kwargs.items():
            if value is not False:
                check_command.append('--{}'.format(key.replace('_', '-')))

                if value is not True:
                    check_command.append(str(value))

        result = run_command(check_command, capture=True)
        if AGENT_COLLECTOR_SEPARATOR not in result.stdout:
            raise ValueError(
                '{}{}\nCould find `{}` in the output'.format(result.stdout, result.stderr, AGENT_COLLECTOR_SEPARATOR)
            )

        _, _, collector_output = result.stdout.partition(AGENT_COLLECTOR_SEPARATOR)
        collector = json.loads(collector_output.strip())

        replay_check_run(collector, aggregator)

        return aggregator

    # Give an explicit name so we don't shadow other uses
    with TempDir('dd_agent_check') as temp_dir:
        yield run_check
示例#16
0
    def test_custom_path_precedence(self):
        with TempDir() as d:
            template_file = path_join(d, 'init_config', 'tags.yaml')
            ensure_parent_dir_exists(template_file)
            write_file(template_file, 'test:\n- foo\n- bar')

            templates = ConfigTemplates([d])

            assert templates.load('init_config/tags') == {'test': ['foo', 'bar']}
示例#17
0
def instance_critical():
    with TempDir() as d:
        yield {
            'reboot_signal_file': touch(join(d, 'reboot-required.critical')),
            'created_at_file': touch(
                join(d, 'reboot-required.created_at.critical'), times=(SIXTEEN_DAYS_AGO, SIXTEEN_DAYS_AGO)
            ),
            'days_warning': 7,
            'days_critical': 14
        }
示例#18
0
    def test_parse_error(self):
        with TempDir() as d:
            template_file = path_join(d, 'invalid.yaml')
            ensure_parent_dir_exists(template_file)
            write_file(template_file, '> invalid')

            templates = ConfigTemplates([d])

            with pytest.raises(ValueError, match='^Unable to parse template `{}`'.format(re.escape(template_file))):
                templates.load('invalid')
示例#19
0
def dd_environment():
    with TempDir("nagios_var_log") as rrd_path:
        e2e_metadata = deepcopy(E2E_METADATA)
        e2e_metadata['docker_volumes'] = ['{}:{}'.format(rrd_path, RRD_PATH)]

        with docker_run(
            conditions=[WaitFor(set_up_cacti)],
            compose_file=os.path.join(HERE, "compose", "docker-compose.yaml"),
            env_vars={'RRD_PATH': rrd_path},
            build=True,
        ):
            yield INSTANCE_INTEGRATION, e2e_metadata
示例#20
0
def legacy_environment():
    env = {}
    env['HAPROXY_CONFIG_DIR'] = os.path.join(HERE, 'compose')
    env['HAPROXY_CONFIG_OPEN'] = os.path.join(HERE, 'compose',
                                              'haproxy-open.cfg')
    with docker_run(
            compose_file=os.path.join(HERE, 'compose', 'haproxy.yaml'),
            env_vars=env,
            service_name="haproxy-open",
            conditions=[WaitFor(wait_for_haproxy_open)],
    ):

        if platform_supports_sockets:
            with TempDir() as temp_dir:
                host_socket_path = os.path.join(temp_dir,
                                                'datadog-haproxy-stats.sock')
                env['HAPROXY_CONFIG'] = os.path.join(HERE, 'compose',
                                                     'haproxy.cfg')
                if os.environ.get('HAPROXY_VERSION',
                                  '1.5.11').split('.')[:2] >= ['1', '6']:
                    env['HAPROXY_CONFIG'] = os.path.join(
                        HERE, 'compose', 'haproxy-1_6.cfg')
                env['HAPROXY_SOCKET_DIR'] = temp_dir

                with docker_run(
                        compose_file=os.path.join(HERE, 'compose',
                                                  'haproxy.yaml'),
                        env_vars=env,
                        service_name="haproxy",
                        conditions=[WaitFor(wait_for_haproxy)],
                ):
                    try:
                        # on linux this needs access to the socket
                        # it won't work without access
                        chown_args = []
                        user = getpass.getuser()

                        if user != 'root':
                            chown_args += ['sudo']
                        chown_args += ["chown", user, host_socket_path]
                        subprocess.check_call(chown_args, env=env)
                    except subprocess.CalledProcessError:
                        # it's not always bad if this fails
                        pass
                    config = deepcopy(CHECK_CONFIG)
                    unixsocket_url = 'unix://{0}'.format(host_socket_path)
                    config['unixsocket_url'] = unixsocket_url
                    yield {'instances': [config, CONFIG_TCPSOCKET]}
        else:
            yield deepcopy(CHECK_CONFIG_OPEN)
示例#21
0
def dd_environment():
    with TempDir('log') as log_dir:
        with docker_run(
                os.path.join(get_here(), 'compose', 'docker-compose.yml'),
                env_vars={'LOG_DIR': log_dir},
                conditions=[WaitFor(setup_ignite)],
                log_patterns="Ignite node started OK",
        ):
            instance = load_jmx_config()
            instance['instances'][0]['port'] = 49112
            instance['instances'][0]['host'] = get_docker_hostname()
            metadata = E2E_METADATA.copy()
            metadata['docker_volumes'] = ['{}:/var/log/ignite'.format(log_dir)]
            yield instance, metadata
示例#22
0
def dd_environment():
    with TempDir('snmprec') as tmp_dir:
        data_dir = os.path.join(tmp_dir, 'data')
        env = {'DATA_DIR': data_dir}
        if not os.path.exists(data_dir):
            shutil.copytree(os.path.join(COMPOSE_DIR, 'data'), data_dir)
            for data_file in FILES:
                response = requests.get(data_file)
                with open(os.path.join(data_dir, data_file.rsplit('/', 1)[1]), 'wb') as output:
                    output.write(response.content)

        with docker_run(os.path.join(COMPOSE_DIR, 'docker-compose.yaml'), env_vars=env, log_patterns="Listening at"):
            yield generate_container_instance_config(
                SCALAR_OBJECTS + SCALAR_OBJECTS_WITH_TAGS + TABULAR_OBJECTS
            ), E2E_METADATA
示例#23
0
def cassandra_cluster():
    """
        Start the cassandra cluster with required configuration
    """
    env = os.environ
    env['CONTAINER_PORT'] = common.PORT

    # We need to restrict permission on the password file
    # Create a temporary file so if we have to run tests more than once on a machine
    # the original file's perms aren't modified
    with TempDir() as tmpdir:
        jmx_pass_file = os.path.join(common.HERE, "compose",
                                     'jmxremote.password')
        copy_path(jmx_pass_file, tmpdir)
        temp_jmx_file = os.path.join(tmpdir, 'jmxremote.password')
        env['JMX_PASS_FILE'] = temp_jmx_file
        os.chmod(temp_jmx_file, stat.S_IRWXU)
        docker_compose_args = [
            "docker-compose", "-f",
            os.path.join(common.HERE, 'compose', 'docker-compose.yaml')
        ]
        subprocess.check_call(docker_compose_args +
                              ["up", "-d", common.CASSANDRA_CONTAINER_NAME])
        # wait for the cluster to be up before yielding
        if not wait_on_docker_logs(common.CASSANDRA_CONTAINER_NAME, 20, [
                'Listening for thrift clients',
                "Created default superuser role 'cassandra'"
        ]):
            raise Exception(
                "Cassandra cluster dd-test-cassandra boot timed out!")
        cassandra_seed = get_container_ip("{}".format(
            common.CASSANDRA_CONTAINER_NAME))
        env['CASSANDRA_SEEDS'] = cassandra_seed.decode('utf-8')
        subprocess.check_call(docker_compose_args +
                              ["up", "-d", common.CASSANDRA_CONTAINER_NAME_2])
        if not wait_on_docker_logs(common.CASSANDRA_CONTAINER_NAME_2, 50, [
                'Listening for thrift clients',
                'Not starting RPC server as requested'
        ]):
            raise Exception("Cassandra cluster {} boot timed out!".format(
                common.CASSANDRA_CONTAINER_NAME_2))
        subprocess.check_call([
            "docker", "exec", common.CASSANDRA_CONTAINER_NAME, "cqlsh", "-e",
            "CREATE KEYSPACE IF NOT EXISTS test WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor':2}"
        ])
        yield

    subprocess.check_call(docker_compose_args + ["down"])
示例#24
0
def dd_environment():
    nagios_conf = os.path.join(HERE, 'compose', 'nagios4', 'nagios.cfg')
    with TempDir("nagios_var_log") as nagios_var_log:
        e2e_metadata = {
            'docker_volumes': [
                '{}:/opt/nagios/etc/nagios.cfg'.format(nagios_conf),
                '{}:/opt/nagios/var/log/'.format(nagios_var_log),
            ]
        }

        with docker_run(
                os.path.join(HERE, 'compose', 'docker-compose.yaml'),
                env_vars={'NAGIOS_LOGS_PATH': nagios_var_log},
                build=True,
        ):
            yield INSTANCE_INTEGRATION, e2e_metadata
示例#25
0
def dd_environment():
    with TempDir() as d:
        host_socket_path = os.path.join(d, 'ldapi')

        if not file_exists(host_socket_path):
            os.chmod(d, 0o770)
            create_file(host_socket_path)
            os.chmod(host_socket_path, 0o640)

        with docker_run(
                compose_file=os.path.join(HERE, 'compose',
                                          'docker-compose.yaml'),
                env_vars={'HOST_SOCKET_DIR': d},
                log_patterns='slapd starting',
        ):
            yield DEFAULT_INSTANCE
示例#26
0
def certs(dd_environment):
    downloads = {'https://valid.mock': 'valid.pem', 'https://expired.mock': 'expired.pem'}
    raw_downloads = {
        'https://valid.mock': 'valid.crt',
    }
    certs = {}
    with TempDir('certs') as tmp_dir:
        for address, name in iteritems(downloads):
            filepath = os.path.join(tmp_dir, name)
            download_cert(filepath, address)
            certs[name] = filepath
        for address, name in iteritems(raw_downloads):
            filepath = os.path.join(tmp_dir, name)
            certs[name] = download_cert(filepath, address, raw=True)
            certs[name] = filepath
        yield certs
示例#27
0
def uds_path():
    if Platform.is_mac():
        # See: https://github.com/docker/for-mac/issues/483
        pytest.skip('Sharing Unix sockets is not supported by Docker for Mac.')

    with TempDir() as tmp_dir:
        compose_file = os.path.join(HERE, 'compose', 'uds.yaml')
        uds_filename = 'tmp.sock'
        uds_path = os.path.join(tmp_dir, uds_filename)
        with docker_run(
                compose_file=compose_file,
                env_vars={
                    "UDS_HOST_DIRECTORY": tmp_dir,
                    'UDS_FILENAME': uds_filename,
                },
        ):
            yield uds_path
示例#28
0
def dd_environment():
    with TempDir('log') as log_dir:
        docker_volumes = ['{}:/var/log/ignite'.format(log_dir)]
        conditions = []
        jvm_opts = ''

        if common.IS_PRE_2_9:
            # Activate JMX through 'control.sh' and functions made available to 'ignite.sh'.
            functions_sh = os.path.join(common.HERE, 'compose', 'functions.sh')
            docker_volumes.append(
                '{}:/opt/ignite/apache-ignite/bin/include/functions.sh'.format(
                    functions_sh))
            conditions.append(WaitFor(control_sh_activate))
        else:
            # On 2.9.0 and above, the Ignite Docker image calls the JVM directly,
            # so JMX configuration should be set via JVM options.
            # See: https://ignite.apache.org/docs/latest/installation/installing-using-docker
            jvm_opts = ('-Dcom.sun.management.jmxremote '
                        '-Dcom.sun.management.jmxremote.port=49112 '
                        '-Dcom.sun.management.jmxremote.rmi.port=49112 '
                        '-Dcom.sun.management.jmxremote.authenticate=false '
                        '-Dcom.sun.management.jmxremote.ssl=false')

        env_vars = {
            'IGNITE_IMAGE': common.IGNITE_IMAGE,
            'JVM_OPTS': jvm_opts,
            'LOG_DIR': log_dir,
        }

        with docker_run(
                os.path.join(get_here(), 'compose', 'docker-compose.yml'),
                env_vars=env_vars,
                conditions=conditions,
                log_patterns="Ignite node started OK",
                attempts=2,
        ):
            instance = load_jmx_config()
            instance['instances'][0]['port'] = 49112
            instance['instances'][0]['host'] = get_docker_hostname()
            metadata = E2E_METADATA.copy()
            metadata['docker_volumes'] = docker_volumes
            yield instance, metadata
示例#29
0
def kerberos_agent():

    with TempDir() as tmp_dir:
        shared_volume = os.path.join(tmp_dir, "shared-volume")
        compose_file = os.path.join(HERE, "compose", "kerberos-agent.yaml")
        realm = "EXAMPLE.COM"
        svc = "HTTP"
        webserver_hostname = "web.example.com"
        webserver_port = "8080"
        krb5_conf = os.path.join(HERE, "fixtures", "kerberos", "krb5.conf")

        common_config = {
            "url": "http://*****:*****@{}".format(realm),
            "tmp_dir": tmp_dir,
            "dd_api_key": os.getenv('DD_API_KEY'),
        }

        with docker_run(
                compose_file=compose_file,
                env_vars={
                    'SHARED_VOLUME': shared_volume,
                    'KRB5_CONFIG': krb5_conf,
                    'KRB5_KEYTAB': common_config['keytab'],
                    'KRB5_CCNAME': common_config['cache'],
                    'KRB5_REALM': common_config['realm'],
                    'KRB5_SVC': common_config['svc'],
                    'WEBHOST': common_config['hostname'],
                    'WEBPORT': webserver_port,
                    'DD_API_KEY': common_config['dd_api_key'],
                },
                conditions=[CheckDockerLogs(compose_file, "ReadyToConnect")],
        ):
            yield common_config
示例#30
0
def dd_environment():
    compose_file = os.path.join(common.HERE, 'docker', 'docker-compose.yaml')

    # The scanner creates artifacts within the project such as `.scannerwork/`
    with TempDir('sonarqube-project') as temp_dir:
        project_dir = os.path.join(temp_dir, 'project')
        if not os.path.isdir(project_dir):
            shutil.copytree(os.path.join(common.HERE, 'docker', 'project'),
                            project_dir)

        with docker_run(
                compose_file,
                service_name='sonarqube',
                env_vars={'PROJECT_DIR': project_dir},
                conditions=[
                    CheckDockerLogs('sonarqube', ['SonarQube is up'],
                                    attempts=100,
                                    wait=3),
                    CheckEndpoints([common.WEB_INSTANCE['web_endpoint']]),
                ],
                mount_logs=True,
        ):
            with docker_run(
                    compose_file,
                    service_name='sonar-scanner',
                    env_vars={'PROJECT_DIR': project_dir},
                    conditions=[
                        CheckDockerLogs('sonar-scanner',
                                        ['ANALYSIS SUCCESSFUL'],
                                        attempts=100,
                                        wait=3)
                    ],
                    sleep=10,
                    # Don't worry about spinning down since the outermost runner will already do that
                    down=lambda: None,
            ):
                yield common.CHECK_CONFIG, {'use_jmx': True}