def mock_e2e_agent(check_name, hosts):
    """
    Only useful in e2e tests, this method will find the relevant agent container and update its hosts file.
    No mapping can be done, all hostname in 'hosts' will redirect to localhost. There is no port redirection.
    :param check_name: The name of the current check, used to determine the agent container name.
    :param hosts: The list of hosts to redirect to localhost
    """
    container_id = "dd_{}_{}".format(check_name, os.environ["TOX_ENV_NAME"])
    commands = []
    for host in hosts:
        commands.append(
            r'bash -c "printf \"127.0.0.1 {}\n\" >> /etc/hosts"'.format(host))

    for command in commands:
        run_command('docker exec {} {}'.format(container_id, command))

    yield

    commands = ['cp /etc/hosts /hosts.new']
    for host in hosts:
        commands.append(
            r'bash -c "sed -i \"/127.0.0.1 {}/d\" /hosts.new"'.format(host))
    commands.append('cp -f /hosts.new /etc/hosts')
    for command in commands:
        run_command('docker exec {} {}'.format(container_id, command))
    def __call__(self):
        # First wait for the api to be available
        super(WaitAndUnsealVault, self).__call__()

        # Then unseal the vault
        result = run_command("docker exec vault-leader vault operator init",
                             capture=True)
        if result.stderr:
            raise Exception(result.stderr)
        result = result.stdout.split('\n')
        keys = [line.split(':')[1].strip() for line in result[:3]]
        for k in keys:
            err = run_command(
                "docker exec vault-leader vault operator unseal {}".format(k),
                capture=True).stderr
            if err:
                raise Exception("Can't unseal vault-leader. \n{}".format(err))
            err = run_command(
                "docker exec vault-replica vault operator unseal {}".format(k),
                capture=True).stderr
            if err:
                raise Exception("Can't unseal vault-replica. \n{}".format(err))

        root_token = [line for line in result if 'Initial Root Token' in line]
        if not root_token:
            raise Exception("Can't find root token in vault output")
        self.root_token = root_token[0].split(':')[1].strip()

        return True
Esempio n. 3
0
def generate_config_with_creds():
    access_id = run_command(
        [
            "docker",
            "exec",
            "dd-test-riakcs",
            "bash",
            "-c",
            "grep admin_key /etc/riak-cs/advanced.config | cut -d '\"' -f2",
        ],
        capture="out",
    ).stdout.strip()
    access_secret = run_command(
        [
            "docker",
            "exec",
            "dd-test-riakcs",
            "bash",
            "-c",
            "grep admin_secret /etc/riak-cs/advanced.config | cut -d '\"' -f2",
        ],
        capture="out",
    ).stdout.strip()

    config = copy.deepcopy(CONFIG_21)
    config["access_id"] = access_id
    config["access_secret"] = access_secret
    config["is_secure"] = False
    config["s3_root"] = "s3.amazonaws.dev"

    return config
Esempio n. 4
0
    def __call__(self):
        # First wait for the api to be available
        super(WaitAndUnsealVault, self).__call__()

        # Then unseal the vault
        result = run_command("docker exec vault-leader vault operator init", capture=True)
        if result.stderr:
            raise Exception(result.stderr)
        result = result.stdout.split('\n')
        keys = [line.split(':')[1].strip() for line in result[:3]]
        for k in keys:
            err = run_command("docker exec vault-leader vault operator unseal {}".format(k), capture=True).stderr
            if err:
                raise Exception("Can't unseal vault-leader. \n{}".format(err))
            err = run_command("docker exec vault-replica vault operator unseal {}".format(k), capture=True).stderr
            if err:
                raise Exception("Can't unseal vault-replica. \n{}".format(err))

        root_token = [line for line in result if 'Initial Root Token' in line]
        if not root_token:
            raise Exception("Can't find root token in vault output")
        root_token = root_token[0].split(':')[1].strip()

        # Set up auto-auth
        for command in (
            'login {}'.format(root_token),
            'policy write metrics /home/metrics_policy.hcl',
            'audit enable file file_path=/vault/vault-audit.log',
            'auth enable jwt',
            'write auth/jwt/config jwt_supported_algs=RS256 jwt_validation_pubkeys=@/home/pub.pem',
            'write auth/jwt/role/datadog role_type=jwt bound_audiences=test user_claim=name token_policies=metrics',
            'agent -config=/home/agent_config.hcl',
        ):
            time.sleep(2)
            run_command('docker exec vault-leader vault {}'.format(command), capture=True, check=True)
Esempio n. 5
0
def set_up_cacti():
    commands = [
        ['/sbin/restore'],
        ['mysql', '-u', 'root', '-e', "flush privileges;"],
        ['php', '/opt/cacti/lib/poller.php', '--force'],
    ]
    for c in commands:
        command = ['docker', 'exec', CONTAINER_NAME] + c
        run_command(command, capture=True, check=True)
Esempio n. 6
0
    def __call__(self):
        if not ON_WINDOWS:
            user = getpass.getuser()
            chown_args = ['chown', user, self.token_file]

            if user != 'root':
                chown_args.insert(0, 'sudo')

            run_command(chown_args, check=True)
Esempio n. 7
0
def test_followers(aggregator, spin_up_etcd):
    compose_file = spin_up_etcd

    urls = []
    result = run_command('docker-compose -f {} ps -q'.format(compose_file),
                         capture='out',
                         check=True)
    container_ids = result.stdout.splitlines()

    for container_id in container_ids:
        result = run_command('docker port {} 2379/tcp'.format(container_id),
                             capture='out',
                             check=True)
        port = result.stdout.strip().split(':')[-1]
        urls.append('http://{}:{}'.format(HOST, port))

    for url in urls:
        if is_leader(url):
            break
    else:
        raise Exception('No leader found')

    response = requests.get('{}/v2/stats/leader'.format(url))
    followers = list(response.json().get('followers', {}).keys())

    instance = {'url': url}
    check = Etcd(CHECK_NAME, None, {}, [instance])
    check.check(instance)

    common_leader_tags = ['url:{}'.format(url), 'etcd_state:leader']
    follower_tags = [
        common_leader_tags + ['follower:{}'.format(followers[0])],
        common_leader_tags + ['follower:{}'.format(followers[1])],
    ]

    for fol_tags in follower_tags:
        aggregator.assert_metric('etcd.leader.counts.fail',
                                 count=1,
                                 tags=fol_tags)
        aggregator.assert_metric('etcd.leader.counts.success',
                                 count=1,
                                 tags=fol_tags)
        aggregator.assert_metric('etcd.leader.latency.avg',
                                 count=1,
                                 tags=fol_tags)
        aggregator.assert_metric('etcd.leader.latency.min',
                                 count=1,
                                 tags=fol_tags)
        aggregator.assert_metric('etcd.leader.latency.max',
                                 count=1,
                                 tags=fol_tags)
        aggregator.assert_metric('etcd.leader.latency.stddev',
                                 count=1,
                                 tags=fol_tags)
        aggregator.assert_metric('etcd.leader.latency.current',
                                 count=1,
                                 tags=fol_tags)
Esempio n. 8
0
def mock_hosts_e2e():
    """Only for e2e testing"""
    container_id = "dd_kafka_consumer_{}".format(os.environ["TOX_ENV_NAME"])
    commands = []
    for mocked_host in ['kafka1', 'kafka2']:
        commands.append(r'bash -c "printf \"127.0.0.1 {}\n\" >> /etc/hosts"'.format(mocked_host))

    for command in commands:
        run_command('docker exec {} {}'.format(container_id, command))
Esempio n. 9
0
def setup_linkerd_cluster():
    clusters = run_command(["kind", "get", "clusters"], capture='out')
    cluster = [c for c in clusters.stdout.split() if 'linkerd' in c][0]
    result = run_command(
        ["kind", "get", "kubeconfig", "--internal", "--name", cluster],
        capture='out',
        check=True,
    )
    with open('/tmp/kubeconfig.yaml', 'w') as f:
        f.write(result.stdout)
    run_command(['cat', '/tmp/kubeconfig.yaml'], check=True, shell=True)
Esempio n. 10
0
def test_new_check_test():
    check_path = os.path.join(CORE_ROOT, 'my_check')

    try:
        run_command(
            [sys.executable, '-m', 'datadog_checks.dev', 'create', '-q', '-l', CORE_ROOT, 'my-check'],
            capture=True,
            check=True
        )
        run_command(
            [sys.executable, '-m', 'pip', 'install', check_path],
            capture=True,
            check=True
        )

        with chdir(check_path):
            with EnvVars(ignore=[TESTING_PLUGIN]):
                run_command([sys.executable, '-m', 'pytest'], capture=True, check=True)

        run_command(
            [sys.executable, '-m', 'pip', 'uninstall', '-y', 'my-check'],
            capture=True,
            check=True
        )
    finally:
        remove_path(check_path)
Esempio n. 11
0
def test_new_check_test(integration_type, installable):
    check_path = os.path.join(CORE_ROOT, 'my_check')

    try:
        run_command(
            [
                sys.executable,
                '-m',
                'datadog_checks.dev',
                'create',
                '--type',
                integration_type,
                '--quiet',
                '--location',
                CORE_ROOT,
                'My Check',
            ],
            capture=True,
            check=True,
        )
        if installable:
            run_command([sys.executable, '-m', 'pip', 'install', check_path],
                        capture=True,
                        check=True)

            with chdir(check_path):
                ignored_env_vars = [TESTING_PLUGIN, 'PYTEST_ADDOPTS']
                ignored_env_vars.extend(ev for ev in os.environ
                                        if ev.startswith(E2E_PREFIX))

                with EnvVars(ignore=ignored_env_vars):
                    run_command([sys.executable, '-m', 'pytest'],
                                capture=True,
                                check=True)

            # We only run style checks on the generated integration. Running the entire test suite would result in tox
            # creating Python environments, which would be too slow with little benefits.
            result = run_command([
                sys.executable, '-m', 'datadog_checks.dev', 'test', '-s',
                'my_check'
            ],
                                 capture=True,
                                 check=True)
            # `ddev test` will not fail if the provided check name doesn't correspond to an existing integration.
            # Instead, it will log a message. So we test for that message to verify style checks ran at all.
            assert 'Nothing to test!' not in result.stdout

            result = run_command([
                sys.executable, '-m', 'pip', 'uninstall', '-y',
                'datadog-my-check'
            ],
                                 capture=True,
                                 check=True)
            # `pip uninstall` is idempotent, so it will not fail if `check_package_name` is incorrect (i.e. the package
            # could not be found). Instead, it will log a warning, so we test for that warning to verify the package was
            # successfully uninstalled.
            # See: https://github.com/pypa/pip/issues/3016
            assert 'WARNING: Skipping' not in result.stdout
    finally:
        remove_path(check_path)
def test_new_check_test():
    check_path = os.path.join(CORE_ROOT, 'my_check')

    try:
        run_command(
            [
                sys.executable, '-m', 'datadog_checks.dev', 'create', '-q',
                '-l', CORE_ROOT, 'my-check'
            ],
            capture=True,
            check=True,
        )
        run_command([sys.executable, '-m', 'pip', 'install', check_path],
                    capture=True,
                    check=True)

        with chdir(check_path):
            ignored_env_vars = [TESTING_PLUGIN]
            ignored_env_vars.extend(ev for ev in os.environ
                                    if ev.startswith(E2E_PREFIX))

            with EnvVars(ignore=ignored_env_vars):
                run_command([sys.executable, '-m', 'pytest'],
                            capture=True,
                            check=True)

        run_command(
            [sys.executable, '-m', 'pip', 'uninstall', '-y', 'my-check'],
            capture=True,
            check=True)
    finally:
        remove_path(check_path)
Esempio n. 13
0
def setup_sharding(compose_file):
    service_commands = [
        ('config01', 'mongo --port 27017 < /scripts/init-configserver.js'),
        ('shard01a', 'mongo --port 27018 < /scripts/init-shard01.js'),
        ('shard02a', 'mongo --port 27019 < /scripts/init-shard02.js'),
        ('shard03a', 'mongo --port 27020 < /scripts/init-shard03.js'),
        ('router', 'mongo < /scripts/init-router.js'),
    ]

    for i, (service, command) in enumerate(service_commands, 1):
        # Wait before router init
        if i == len(service_commands):
            time.sleep(20)

        run_command(['docker-compose', '-f', compose_file, 'exec', '-T', service, 'sh', '-c', command], check=True)
Esempio n. 14
0
    def run_check(config=None, **kwargs):
        root = os.path.dirname(request.module.__file__)
        while True:
            if os.path.isfile(os.path.join(root, 'setup.py')):
                check = os.path.basename(root)
                break

            new_root = os.path.dirname(root)
            if new_root == root:
                raise OSError('No Datadog Agent check found')

            root = new_root

        python_path = os.environ[E2E_PARENT_PYTHON]
        env = os.environ['TOX_ENV_NAME']

        check_command = [
            python_path, '-m', 'datadog_checks.dev', 'env', 'check', check,
            env, '--json'
        ]

        if config:
            config = format_config(config)
            config_file = os.path.join(
                temp_dir,
                '{}-{}-{}.json'.format(check, env,
                                       urlsafe_b64encode(os.urandom(6))))

            with open(config_file, 'wb') as f:
                output = json.dumps(config).encode('utf-8')
                f.write(output)
            check_command.extend(['--config', config_file])

        for key, value in kwargs.items():
            if value is not False:
                check_command.append('--{}'.format(key.replace('_', '-')))

                if value is not True:
                    check_command.append(str(value))

        result = run_command(check_command, capture=True)

        matches = re.findall(
            AGENT_COLLECTOR_SEPARATOR + r'\n(.*?\n(?:\} \]|\]))',
            result.stdout, re.DOTALL)

        if not matches:
            raise ValueError('{}{}\nCould not find `{}` in the output'.format(
                result.stdout, result.stderr, AGENT_COLLECTOR_SEPARATOR))

        for raw_json in matches:
            try:
                collector = json.loads(raw_json)
            except Exception as e:
                raise Exception(
                    "Error loading json: {}\nCollector Json Output:\n{}".
                    format(e, raw_json))
            replay_check_run(collector, aggregator, datadog_agent)

        return aggregator
Esempio n. 15
0
def dd_environment():
    compose_file = os.path.join(HERE, 'compose', 'docker-compose.yaml')
    # We need a custom condition to wait a bit longer
    with docker_run(
        compose_file=compose_file,
        conditions=[
            CheckDockerLogs(compose_file, 'spawning ceph --cluster ceph -w', wait=5),
            CheckDockerLogs(compose_file, 'Running on http://0.0.0.0:5000/'),
        ],
    ):
        # Clean the disk space warning
        run_command(
            ['docker', 'exec', 'dd-test-ceph', 'ceph', 'tell', 'mon.*', 'injectargs', '--mon_data_avail_warn', '5']
        )
        # Wait a bit for the change to take effect
        time.sleep(5)
        yield BASIC_CONFIG, E2E_METADATA
Esempio n. 16
0
    def run_check(config=None, **kwargs):
        root = os.path.dirname(request.module.__file__)
        while True:
            if os.path.isfile(os.path.join(root, 'setup.py')):
                check = os.path.basename(root)
                break

            new_root = os.path.dirname(root)
            if new_root == root:
                raise OSError('No Datadog Agent check found')

            root = new_root

        python_path = os.environ[E2E_PARENT_PYTHON]
        env = os.environ['TOX_ENV_NAME']

        check_command = [
            python_path, '-m', 'datadog_checks.dev', 'env', 'check', check,
            env, '--json'
        ]

        if config:
            config = format_config(config)
            config_file = os.path.join(
                temp_dir,
                '{}-{}-{}.json'.format(check, env,
                                       urlsafe_b64encode(os.urandom(6))))

            with open(config_file, 'wb') as f:
                output = json.dumps(config).encode('utf-8')
                f.write(output)
            check_command.extend(['--config', config_file])

        for key, value in kwargs.items():
            if value is not False:
                check_command.append('--{}'.format(key.replace('_', '-')))

                if value is not True:
                    check_command.append(str(value))

        result = run_command(check_command, capture=True)
        if AGENT_COLLECTOR_SEPARATOR not in result.stdout:
            raise ValueError('{}{}\nCould not find `{}` in the output'.format(
                result.stdout, result.stderr, AGENT_COLLECTOR_SEPARATOR))

        _, _, collector_output = result.stdout.partition(
            AGENT_COLLECTOR_SEPARATOR)
        collector_output = collector_output.strip()
        if not collector_output.endswith(']'):
            # JMX needs some additional cleanup
            collector_output = collector_output[:collector_output.rfind(']') +
                                                1]
        collector = json.loads(collector_output)

        replay_check_run(collector, aggregator)

        return aggregator
Esempio n. 17
0
def create_database(tls=False):
    if tls:
        status_command = (
            'docker exec fdb-coordinator fdbcli -C /var/fdb/fdb.cluster --tls_certificate_file '
            '/var/fdb/fdb.pem --tls_key_file /var/fdb/private.key --tls_verify_peers Check.Valid=0 '
            '--exec "status json"')
    else:
        status_command = 'docker exec fdb-0 fdbcli --exec "status json"'
    base_status = run_command(status_command, capture=True, check=True)
    status = json.loads(base_status.stdout)
    if not status.get('client').get('database_status').get('available'):
        if tls:
            command = (
                'docker exec fdb-coordinator fdbcli -C /var/fdb/fdb.cluster --tls_certificate_file '
                '/var/fdb/fdb.pem --tls_key_file /var/fdb/private.key --tls_verify_peers Check.Valid=0 '
                '--exec "configure new single memory"')
        else:
            command = 'docker exec fdb-0 fdbcli --exec "configure new single memory"'
        run_command(command, capture=True, check=True)
    i = 0
    is_healthy = False
    has_latency_stats = False
    # Wait for 1 minute for the database to become available for testing
    while i < 60 and not (is_healthy and has_latency_stats):
        time.sleep(1)
        base_status = run_command(status_command, capture=True, check=True)
        status = json.loads(base_status.stdout)
        is_healthy = status.get('cluster').get('data').get('state').get(
            'name') == 'healthy'
        has_latency_stats = False
        for _, process in status.get('cluster').get('processes').items():
            for role in process.get('roles'):
                if "commit_latency_statistics" in role:
                    has_latency_stats = True
        i += 1
    if not tls:
        test_data_fill_command = (
            'docker exec fdb-0 fdbcli --exec "writemode on; set basket_size 10; set temperature 37; writemode off"'
        )
        data_committed = run_command(test_data_fill_command,
                                     capture=True,
                                     check=True)
        assert 'Committed' in data_committed.stdout
Esempio n. 18
0
def create_volume():
    run_command("docker exec gluster-node-2 mkdir -p /export-test",
                capture=True,
                check=True)

    for command in (
            'gluster peer probe gluster-node-2',
            'mkdir -p /export-test',
            'gluster volume create gv0 replica 2 gluster-node-1:/export-test gluster-node-2:/export-test force',
            'gluster volume start gv0',
            'yum update -y',
            'yum install -y python3',
            'curl -LO https://github.com/gluster/gstatus/releases/download/v1.0.5/gstatus',
            'chmod +x ./gstatus',
            'mv ./gstatus /usr/local/bin/gstatus',
    ):
        run_command("docker exec gluster-node-1 {}".format(command),
                    capture=True,
                    check=True)
        time.sleep(10)
Esempio n. 19
0
def setup_cilium():
    config = os.path.join(HERE, 'kind', 'cilium.yaml')
    run_command(
        [
            "kubectl",
            "create",
            "clusterrolebinding",
            "cluster-admin-binding",
            "--clusterrole",
            "cluster-admin",
            "--user",
            "*****@*****.**",
        ]
    )
    run_command(["kubectl", "create", "ns", "cilium"])
    run_command(["kubectl", "create", "-f", config])
    run_command(
        ["kubectl", "wait", "deployments", "--all", "--for=condition=Available", "-n", "cilium", "--timeout=300s"]
    )
    run_command(["kubectl", "wait", "pods", "-n", "cilium", "--all", "--for=condition=Ready", "--timeout=300s"])
Esempio n. 20
0
def _autodiscovery_ready():
    result = run_command(
        ['docker', 'exec', 'dd_snmp_{}'.format(TOX_ENV_NAME), 'agent', 'configcheck'], capture=True, check=True
    )

    autodiscovery_checks = []
    for result_line in result.stdout.splitlines():
        if 'autodiscovery_subnet' in result_line:
            autodiscovery_checks.append(result_line)

    # assert subnets discovered by `snmp_listener` config from datadog.yaml
    expected_autodiscovery_checks = 5
    assert len(autodiscovery_checks) == expected_autodiscovery_checks
Esempio n. 21
0
def setup_mapreduce():
    # Run a job in order to get metrics from the environment
    outputdir = 'output{}'.format(random.randrange(1, 1000000))
    command = (
        r'bash -c "hadoop fs -mkdir -p input ; '
        'hdfs dfs -put -f $(find /etc/hadoop/ -type f) input && '
        'hadoop jar opt/hadoop-3.2.1/share/hadoop/mapreduce/hadoop-mapreduce-examples-3.2.1.jar grep input {} '
        '\'dfs[a-z.]+\' &"'.format(outputdir))
    cmd = 'docker exec {} {}'.format(CONTAINER_NAME, command)
    run_command(cmd)

    # Called in WaitFor which catches initial exceptions when containers aren't ready
    for _ in range(15):
        r = requests.get("{}/ws/v1/cluster/apps?states=RUNNING".format(
            INSTANCE_INTEGRATION['resourcemanager_uri']))
        res = r.json()
        if res.get("apps", None) is not None and res.get("apps"):
            return True

        time.sleep(1)

    # nothing started after 15 seconds
    return False
Esempio n. 22
0
def dd_environment():
    compose_file = os.path.join(get_here(), 'compose', 'docker-compose.yaml')

    # Build the topology jar to use in the environment
    with docker_run(compose_file,
                    build=True,
                    service_name='topology-maker',
                    sleep=15):
        run_command([
            'docker', 'cp', 'topology-build:/topology.jar',
            os.path.join(get_here(), 'compose')
        ])
    nimbus_condition = WaitFor(wait_for_thrift)
    with docker_run(compose_file,
                    service_name='storm-nimbus',
                    conditions=[nimbus_condition]):
        with docker_run(compose_file,
                        service_name='storm-ui',
                        log_patterns=[r'org.apache.storm.ui.core']):
            with docker_run(
                    compose_file,
                    service_name='topology',
                    log_patterns=['Finished submitting topology: topology']):
                yield INSTANCE
Esempio n. 23
0
def test_validate_package_validates_emails(authors, expected_exit_code, expected_output):
    runner = CliRunner()

    with runner.isolated_filesystem():
        os.mkdir('my_check')

        with open('my_check/pyproject.toml', 'w') as f:
            f.write(_build_pyproject_file(authors))

        os.makedirs('my_check/datadog_checks/my_check')
        with open('my_check/datadog_checks/my_check/__about__.py', 'w') as f:
            f.write('__version__ = "1.0.0"')

        result = run_command(
            [sys.executable, '-m', 'datadog_checks.dev', '-x', 'validate', 'package', 'my_check'],
            capture=True,
        )

        assert result.code == expected_exit_code
        assert expected_output in result.stdout
Esempio n. 24
0
def control_sh_activate():
    result = run_command(
        "docker exec dd-ignite /opt/ignite/apache-ignite/bin/control.sh --activate",
        capture=True)
    if result.stderr:
        raise Exception(result.stderr)
Esempio n. 25
0
    def initialize(self):
        run_command(
            ('docker exec ibm_db2 su - db2inst1 -c "db2 -c create db {} using codeset utf-8 territory us"'
             .format(self.db_name)),
            check=True,
        )

        # Enable monitoring
        run_command(
            'docker exec ibm_db2 su - db2inst1 -c "db2 -c update dbm cfg using HEALTH_MON on"',
            check=True)
        run_command(
            'docker exec ibm_db2 su - db2inst1 -c "db2 -c update dbm cfg using DFT_MON_STMT on"',
            check=True)
        run_command(
            'docker exec ibm_db2 su - db2inst1 -c "db2 -c update dbm cfg using DFT_MON_LOCK on"',
            check=True)
        run_command(
            'docker exec ibm_db2 su - db2inst1 -c "db2 -c update dbm cfg using DFT_MON_TABLE on"',
            check=True)
        run_command(
            'docker exec ibm_db2 su - db2inst1 -c "db2 -c update dbm cfg using DFT_MON_BUFPOOL on"',
            check=True)

        # Trigger a backup
        # https://www.ibm.com/support/knowledgecenter/en/SSEPGG_11.1.0/com.ibm.db2.luw.admin.cmd.doc/doc/r0001933.html
        run_command(
            ('docker exec ibm_db2 su - db2inst1 -c '
             '"db2 -c quiesce instance db2inst1 restricted access immediate force connections"'
             ),
            check=True,
        )
        run_command(
            'docker exec ibm_db2 su - db2inst1 -c "db2 -c deactivate db datadog"',
            check=True)
        run_command(
            'docker exec ibm_db2 su - db2inst1 -c "db2 -c backup db datadog"',
            check=True)
        run_command(
            'docker exec ibm_db2 su - db2inst1 -c "db2 -c activate db datadog"',
            check=True)
        run_command(
            'docker exec ibm_db2 su - db2inst1 -c "db2 -c unquiesce instance db2inst1"',
            check=True)
Esempio n. 26
0
 def initialize(self):
     run_command(
         ('docker exec ibm_db2 su - db2inst1 -c '
          '"db2 -c create db {} using codeset utf-8 territory us"'.format(
              self.db_name)),
         check=True,
     )
     run_command(
         'docker exec ibm_db2 su - db2inst1 -c "db2 -c update dbm cfg using HEALTH_MON on"',
         check=True)
     run_command(
         'docker exec ibm_db2 su - db2inst1 -c "db2 -c update dbm cfg using DFT_MON_STMT on"',
         check=True)
     run_command(
         'docker exec ibm_db2 su - db2inst1 -c "db2 -c update dbm cfg using DFT_MON_LOCK on"',
         check=True)
     run_command(
         'docker exec ibm_db2 su - db2inst1 -c "db2 -c update dbm cfg using DFT_MON_TABLE on"',
         check=True)
     run_command(
         'docker exec ibm_db2 su - db2inst1 -c "db2 -c update dbm cfg using DFT_MON_BUFPOOL on"',
         check=True)
Esempio n. 27
0
 def __call__(self, *args, **kwargs):
     run_command(
         'docker exec ibm_was /opt/IBM/WebSphere/AppServer/profiles/AppSrv01/bin/wsadmin.sh '
         '-lang jython -user wsadmin -password IbmWasPassword1 -f /home/scripts/init.jython'
     )
Esempio n. 28
0
def run_docker_command(command):
    cmd = ['docker', 'exec', CONTAINER_NAME] + command
    return run_command(cmd, capture=True, check=True)
Esempio n. 29
0
def run_docker_command(command):
    run_command(['docker', 'exec', CONTAINER_NAME] + command, capture=True, check=True)
Esempio n. 30
0
def setup_cert_manager():
    run_command([
        "kubectl",
        "apply",
        "-f",
        "https://raw.githubusercontent.com/open-policy-agent/gatekeeper/release-3.3/deploy/gatekeeper.yaml",
    ])
    run_command([
        "kubectl",
        "wait",
        "deployments",
        "--all",
        "--for=condition=Available",
        "-n",
        "gatekeeper-system",
        "--timeout=300s",
    ])
    run_command([
        "kubectl", "wait", "pods", "-n", "gatekeeper-system", "--all",
        "--for=condition=Ready", "--timeout=300s"
    ])
    config = os.path.join(HERE, 'kubernetes', 'constrainttemplate.yaml')
    run_command(["kubectl", "create", "-f", config])
    run_command([
        "kubectl", "wait", "constrainttemplate", "--all",
        "--for=condition=Ready", "--timeout=300s"
    ])
    config = os.path.join(HERE, 'kubernetes', 'constraintsample.yaml')
    run_command(["kubectl", "create", "-f", config])