示例#1
0
def do_main(args):
    logger.setup(args['--log-level'].upper(),
                 noisy_modules=['googleapiclient', 'oauth2client'])

    config_path = args['--config-path']
    if args['create']:
        config = dcos_launch.config.get_validated_config_from_path(config_path)
        info_path = args['--info-path']
        if os.path.exists(info_path):
            raise dcos_launch.util.LauncherError(
                'InputConflict', '{} already exists! Delete this or specify a '
                'different cluster info path with the -i option'.format(
                    info_path))
        write_json(info_path, dcos_launch.get_launcher(config).create())
        return 0

    try:
        info = load_json(args['--info-path'])
    except FileNotFoundError as ex:
        raise dcos_launch.util.LauncherError('MissingInfoJSON', None) from ex

    launcher = dcos_launch.get_launcher(info)

    if args['wait']:
        launcher.wait()
        print('Cluster is ready!')
        return 0

    if args['describe']:
        print(json_prettyprint(launcher.describe()))
        return 0

    if args['pytest']:
        var_list = list()
        if args['--env'] is not None:
            if '=' in args['--env']:
                # User is attempting to do an assigment with the option
                raise dcos_launch.util.LauncherError(
                    'OptionError',
                    "The '--env' option can only pass through environment variables "
                    "from the current environment. Set variables according to the shell being used."
                )
            var_list = args['--env'].split(',')
            missing = [v for v in var_list if v not in os.environ]
            if len(missing) > 0:
                raise dcos_launch.util.LauncherError(
                    'MissingInput',
                    'Environment variable arguments have been indicated '
                    'but not set: {}'.format(repr(missing)))
        env_dict = {e: os.environ[e] for e in var_list}
        return launcher.test(args['<pytest_extras>'], env_dict)

    if args['delete']:
        launcher.delete()
        return 0
示例#2
0
def check_success(capsys, tmpdir, config_path):
    """
    Runs through the required functions of a launcher and then
    runs through the default usage of the script for a
    given config path and info path, ensuring each step passes
    if all steps finished successfully, this parses and returns the generated
    info JSON and stdout description JSON for more specific checks
    """
    # Test launcher directly first
    config = dcos_launch.config.get_validated_config_from_path(config_path)
    launcher = dcos_launch.get_launcher(config)
    info = launcher.create()
    # Grab the launcher again with the output from create
    launcher = dcos_launch.get_launcher(info)
    launcher.wait()
    launcher.describe()
    launcher.test([], {})
    launcher.delete()

    info_path = str(
        tmpdir.join('my_specific_info.json'))  # test non-default name

    # Now check launcher via CLI
    check_cli([
        'create', '--config-path={}'.format(config_path),
        '--info-path={}'.format(info_path)
    ])
    # use the info written to disk to ensure JSON parsable
    with open(info_path) as f:
        info = json.load(f)

    check_cli(['wait', '--info-path={}'.format(info_path)])

    # clear stdout capture
    capsys.readouterr()
    check_cli(['describe', '--info-path={}'.format(info_path)])
    # capture stdout from describe and ensure JSON parse-able
    description = json.loads(capsys.readouterr()[0])

    # general assertions about description
    assert 'masters' in description
    assert 'private_agents' in description
    assert 'public_agents' in description

    check_cli(['pytest', '--info-path={}'.format(info_path)])

    check_cli(['delete', '--info-path={}'.format(info_path)])

    return info, description
示例#3
0
def test_key_helper(aws_cf_with_helper_config_path):
    config = dcos_launch.config.get_validated_config_from_path(
        aws_cf_with_helper_config_path)
    aws_launcher = dcos_launch.get_launcher(config)
    temp_resources = aws_launcher.key_helper()
    assert temp_resources['key_name'] == config['deployment_name']
    assert config['template_parameters']['KeyName'] == config[
        'deployment_name']
    assert config['ssh_private_key'] == dcos_launch.util.MOCK_SSH_KEY_DATA
def onprem_launcher():
    """ Provides the OnpremLauncher to create a cluster for installation.
    """
    launcher = dcos_launch.get_launcher(config.get_validated_config(
        os.environ['TEST_LAUNCH_CONFIG_PATH']))
    if launcher.config['provider'] != 'onprem':
        pytest.skip('Must use a launch config with `provider: onprem` to run this test')
    if launcher.config['platform'] != 'aws':
        pytest.skip('Must use a launch config with `platform: aws` to run this test')
    return launcher
示例#5
0
def test_missing_aws_stack(aws_cf_config_path, monkeypatch):
    """ Tests that clean and appropriate errors will be raised
    """
    monkeypatch.setattr(dcos_test_utils.aws, 'fetch_stack',
                        mock_stack_not_found)
    config = dcos_launch.config.get_validated_config(aws_cf_config_path)
    aws_launcher = dcos_launch.get_launcher(config)

    def check_stack_error(cmd, args):
        with pytest.raises(dcos_launch.util.LauncherError) as exinfo:
            getattr(aws_launcher, cmd)(*args)
        assert exinfo.value.error == 'StackNotFound'

    info = aws_launcher.create()
    aws_launcher = dcos_launch.get_launcher(info)
    check_stack_error('wait', ())
    check_stack_error('describe', ())
    check_stack_error('delete', ())
    check_stack_error('test', ([], {}))
示例#6
0
def test_fault_domain_helper(check_cli_success, gcp_onprem_with_fd_helper_config_path, monkeypatch, tmpdir):

    config = dcos_launch.config.get_validated_config_from_path(gcp_onprem_with_fd_helper_config_path)

    # set the onprem cluster to return the correct number of mocked nodes, these names are throw awawy
    mock_private_agent_ips = list((helpers.Host('foo', 'bar') for _ in range(config['num_private_agents'])))
    mock_public_agent_ips = list((helpers.Host('foo', 'bar') for _ in range(config['num_public_agents'])))
    mock_master_ips = list((helpers.Host('foo', 'bar') for _ in range(config['num_masters'])))
    monkeypatch.setattr(
        dcos_test_utils.onprem.OnpremCluster,
        'get_private_agent_ips',
        lambda *args, **kwargs: mock_private_agent_ips)
    monkeypatch.setattr(
        dcos_test_utils.onprem.OnpremCluster,
        'get_public_agent_ips',
        lambda *args, **kwargs: mock_public_agent_ips)
    monkeypatch.setattr(
        dcos_test_utils.onprem.OnpremCluster,
        'get_master_ips',
        lambda *args, **kwargs: mock_master_ips)
    # now mock the hostnames that will be returned by the SSH command
    total_nodes = config['num_private_agents'] + config['num_public_agents'] + config['num_masters']
    # tail with '\n' to mimic reality
    hostname_stack = list((s.encode() for s in ('host-' + str(i) + '\n' for i in range(total_nodes))))
    hostname_list = list(hostname_stack)
    monkeypatch.setattr(
        dcos_test_utils.ssh_client.SshClient,
        'command',
        lambda *args, **kwargs: hostname_stack.pop())
    launcher = dcos_launch.get_launcher(config)
    fd_script = launcher._fault_domain_helper()
    results = collections.defaultdict(list)
    with tmpdir.as_cwd():
        for host in hostname_list:
            script_path = tmpdir.join('fault-domain-detect.sh')
            script_path.write(fd_script)
            subprocess.check_call([
                # strip \n here as this is hacking the processed script
                'sed', '-i',
                's/hostname=$(hostname)/hostname={}/g'.format(host.decode().strip('\n')),
                str(script_path)])
            fd_out = subprocess.check_output(['bash', str(script_path)])
            fd_json = json.loads(fd_out.decode())
            assert 'region' in fd_json['fault_domain']
            assert 'zone' in fd_json['fault_domain']
            results[fd_json['fault_domain']['region']['name']].append(fd_json['fault_domain']['zone']['name'])
    for region, info in config['fault_domain_helper'].items():
        # assert there are the correct number of assignments per region
        if info['local']:
            assert len(results[region]) == \
                info['num_private_agents'] + info['num_public_agents'] + config['num_masters']
        else:
            assert len(results[region]) == info['num_private_agents'] + info['num_public_agents']
        # assert there are the correct number of zones in the region
        assert set([region + '-' + str(i) for i in range(1, info['num_zones'] + 1)]) == set(results[region])
示例#7
0
def test_zen_helper(aws_zen_cf_config_path):
    config = dcos_launch.config.get_validated_config(aws_zen_cf_config_path)
    aws_launcher = dcos_launch.get_launcher(config)
    temp_resources = aws_launcher.zen_helper()
    assert temp_resources['vpc'] == dcos_launch.util.MOCK_VPC_ID
    assert temp_resources['gateway'] == dcos_launch.util.MOCK_GATEWAY_ID
    assert temp_resources['private_subnet'] == dcos_launch.util.MOCK_SUBNET_ID
    assert temp_resources['public_subnet'] == dcos_launch.util.MOCK_SUBNET_ID
    assert config['template_parameters']['Vpc'] == dcos_launch.util.MOCK_VPC_ID
    assert config['template_parameters'][
        'InternetGateway'] == dcos_launch.util.MOCK_GATEWAY_ID
    assert config['template_parameters'][
        'PrivateSubnet'] == dcos_launch.util.MOCK_SUBNET_ID
    assert config['template_parameters'][
        'PublicSubnet'] == dcos_launch.util.MOCK_SUBNET_ID
def mount_volumes():
    """ Will create 200MB partions on clusters launched by dcos-launch
    """
    script = """
#!/bin/bash
sudo systemctl stop dcos-mesos-slave.service
sudo rm -f /var/lib/dcos/mesos-resources
sudo rm -f /var/lib/mesos/slave/meta/slaves/latest
"""
    for i in range(2):
        script += """
sudo mkdir -p /dcos/volume{idx}
sudo dd if=/dev/zero of=/root/volume{idx}.img bs=1M count={size}
sudo losetup /dev/loop{idx} /root/volume{idx}.img
sudo mkfs -t ext4 /dev/loop{idx}
sudo losetup -d /dev/loop{idx}
echo "/root/volume{idx}.img /dcos/volume{idx} auto loop 0 2" | sudo tee -a /etc/fstab
sudo mount /dcos/volume{idx}
""".format(idx=i, size=200)

    script += """
sudo systemctl restart dcos-mesos-slave.service
"""

    cluster_info_path = os.getenv('CLUSTER_INFO_PATH', 'cluster_info.json')
    if not os.path.exists(cluster_info_path):
        raise Exception('No cluster info to work with!')
    cluster_info_json = json.load(open(cluster_info_path))
    launcher = dcos_launch.get_launcher(cluster_info_json)
    description = launcher.describe()
    ssh = launcher.get_ssh_client()
    with ssh.tunnel(description['masters'][0]['public_ip']) as t:
        t.copy_file(helpers.session_tempfile(ssh.key), 'ssh_key')
        t.copy_file(helpers.session_tempfile(script), 'volume_script.sh')
        t.command(['chmod', '600', 'ssh_key'])
        ssh_command = ['ssh', '-i', 'ssh_key'] + ssh_client.SHARED_SSH_OPTS
        scp_command = ['scp', '-i', 'ssh_key'] + ssh_client.SHARED_SSH_OPTS
        for private_agent in description['private_agents']:
            target = '{}@{}'.format(ssh.user, private_agent['private_ip'])
            t.command(scp_command +
                      ['volume_script.sh', target + ':~/volume_script.sh'])
            t.command(ssh_command + [target, 'bash', 'volume_script.sh'])
        # nasty hack until we add a better post-flight
        time.sleep(60)
def mount_volumes():
    """ Will create 200MB partions on clusters launched by dcos-launch
    """
    script = """
#!/bin/bash
sudo systemctl stop dcos-mesos-slave.service
sudo rm -f /var/lib/dcos/mesos-resources
sudo rm -f /var/lib/mesos/slave/meta/slaves/latest
"""
    for i in range(2):
        script += """
sudo mkdir -p /dcos/volume{idx}
sudo dd if=/dev/zero of=/root/volume{idx}.img bs=1M count={size}
sudo losetup /dev/loop{idx} /root/volume{idx}.img
sudo mkfs -t ext4 /dev/loop{idx}
sudo losetup -d /dev/loop{idx}
echo "/root/volume{idx}.img /dcos/volume{idx} auto loop 0 2" | sudo tee -a /etc/fstab
sudo mount /dcos/volume{idx}
""".format(idx=i, size=200)

    script += """
sudo systemctl restart dcos-mesos-slave.service
"""

    cluster_info_path = os.getenv('CLUSTER_INFO_PATH', 'cluster_info.json')
    if not os.path.exists(cluster_info_path):
        raise Exception('No cluster info to work with!')
    cluster_info_json = json.load(open(cluster_info_path))
    launcher = dcos_launch.get_launcher(cluster_info_json)
    description = launcher.describe()
    ssh = launcher.get_ssh_client()
    with ssh.tunnel(description['masters'][0]['public_ip']) as t:
        t.copy_file(helpers.session_tempfile(ssh.key), 'ssh_key')
        t.copy_file(helpers.session_tempfile(script), 'volume_script.sh')
        t.command(['chmod', '600', 'ssh_key'])
        ssh_command = ['ssh', '-i', 'ssh_key'] + ssh_client.SHARED_SSH_OPTS
        scp_command = ['scp', '-i', 'ssh_key'] + ssh_client.SHARED_SSH_OPTS
        for private_agent in description['private_agents']:
            target = '{}@{}'.format(ssh.user, private_agent['private_ip'])
            t.command(scp_command + ['volume_script.sh', target + ':~/volume_script.sh'])
            t.command(ssh_command + [target, 'bash', 'volume_script.sh'])
        # nasty hack until we add a better post-flight
        time.sleep(60)
def mount_volumes():
    """ Will create 200MB partions on clusters launched by dcos-launch
    """
    volume_script = """#!/bin/bash
set -e

if [ {dcos_mounts} ]; then
    echo 'Volumes already exist, exiting early'
    exit 0
fi

echo 'Stopping agent and clearing state...'

systemctl stop dcos-mesos-slave.service

cat /var/lib/dcos/mesos-resources || echo 'No resources file found'
ls -l /var/lib/mesos/slave/meta/slaves/latest || echo 'No latest agent symlink found'
rm -f /var/lib/dcos/mesos-resources
rm -f /var/lib/mesos/slave/meta/slaves/latest

losetup -a
""".format(dcos_mounts=" -a ".join(
        ["-e /dcos/volume{}".format(i) for i in range(MOUNT_VOLUME_COUNT)]))

    for i in range(MOUNT_VOLUME_COUNT):
        volume_script += """
if [ ! -e {loop_file} ]; then
    echo 'Creating loopback device {loop_dev}...'

    dd if=/dev/zero of={loop_file} bs=1M count={size_mb}
    losetup {loop_dev} {loop_file}
    mkfs -t ext4 {loop_dev}
    losetup -d {loop_dev}
fi

if [ ! -e {dcos_mount} ]; then
    echo 'Creating loopback volume {dcos_mount}...'

    mkdir -p {dcos_mount}
    echo \"{loop_file} {dcos_mount} auto loop 0 2\" | tee -a /etc/fstab
    mount {dcos_mount}
fi
""".format(
            size_mb=MOUNT_VOLUME_SIZE_MB,
            dcos_mount="/dcos/volume{}".format(i),
            loop_dev="/dev/loop{}".format(i),
            loop_file="/root/volume{}.img".format(i),
        )

    volume_script += """
echo 'Restarting agent...'
systemctl restart dcos-mesos-slave.service"""

    cluster_info_path = os.getenv("CLUSTER_INFO_PATH", "cluster_info.json")
    if not os.path.exists(cluster_info_path):
        raise Exception("No cluster info to work with!")
    cluster_info_json = json.load(open(cluster_info_path))
    launcher = dcos_launch.get_launcher(cluster_info_json)
    description = launcher.describe()
    ssh = launcher.get_ssh_client()
    with ssh.tunnel(description["masters"][0]["public_ip"]) as t:
        t.copy_file(helpers.session_tempfile(ssh.key), "ssh_key")
        t.copy_file(helpers.session_tempfile(volume_script),
                    "volume_script.sh")
        t.command(["chmod", "600", "ssh_key"])
        ssh_command = ["ssh", "-i", "ssh_key"] + ssh_client.SHARED_SSH_OPTS
        scp_command = ["scp", "-i", "ssh_key"] + ssh_client.SHARED_SSH_OPTS
        for private_agent in description["private_agents"]:
            target = "{}@{}".format(ssh.user, private_agent["private_ip"])
            t.command(scp_command +
                      ["volume_script.sh", target + ":~/volume_script.sh"])
            t.command(ssh_command +
                      [target, "sudo", "bash", "volume_script.sh"])
        # nasty hack until we add a better post-flight
        time.sleep(60)
示例#11
0
        'exhibitor_storage_backend':
        'static',
        'superuser_username':
        os.environ['DCOS_TEST_ADMIN_USERNAME'],
        'superuser_password_hash':
        sha512_crypt.hash(os.environ['DCOS_TEST_ADMIN_PASSWORD']),
        'fault_domain_enabled':
        False,
        'license_key_contents':
        os.environ['DCOS_TEST_LICENSE'],
    },
}

dcos_launch_config = config.get_validated_config(dcos_launch_config, '/tmp')

launcher = get_launcher(dcos_launch_config)

cluster_info = launcher.create()

launcher = get_launcher(cluster_info)
launcher.wait()

# Workaround for `launcher.install_dcos()` printing to stdout.
real_stdout = sys.stdout
sys.stdout = open(os.devnull, "w")
try:
    launcher.install_dcos()
finally:
    sys.stdout.close()
    sys.stdout = real_stdout
示例#12
0
def mount_volumes():
    """ Will create 200MB partions on clusters launched by dcos-launch
    """
    volume_script = """#!/bin/bash
set -e

if [ {dcos_mounts} ]; then
    echo 'Volumes already exist, exiting early'
    exit 0
fi

echo 'Stopping agent and clearing state...'

systemctl stop dcos-mesos-slave.service

cat /var/lib/dcos/mesos-resources || echo 'No resources file found'
ls -l /var/lib/mesos/slave/meta/slaves/latest || echo 'No latest agent symlink found'
rm -f /var/lib/dcos/mesos-resources
rm -f /var/lib/mesos/slave/meta/slaves/latest

losetup -a
""".format(dcos_mounts=" -a ".join([
        "-e /dcos/volume{}".format(i)
        for i, _ in enumerate(MOUNT_VOLUME_PROFILES)
    ]))

    for i, p in enumerate(MOUNT_VOLUME_PROFILES):
        volume_script += """
if [ ! -e {loop_file} ]; then
    echo 'Creating loopback device {loop_dev}...'

    dd if=/dev/zero of={loop_file} bs=1M count={size_mb}
    losetup {loop_dev} {loop_file}
    mkfs -t {fs_type} {loop_dev}
    losetup -d {loop_dev}
fi

if [ ! -e {dcos_mount} ]; then
    echo 'Creating loopback volume {dcos_mount}...'

    mkdir -p {dcos_mount}
    echo \"{loop_file} {dcos_mount} auto loop 0 2\" | tee -a /etc/fstab
    mount {dcos_mount}
fi
""".format(size_mb=MOUNT_VOLUME_SIZE_MB,
           dcos_mount="/dcos/volume{}".format(i),
           loop_dev="/dev/loop{}".format(i),
           loop_file="/root/volume{}.img".format(i),
           fs_type=p or "ext4")

    # To create profile mount volumes, we manually run `make_disk_resources.py`
    # to generate disk resources, then parse the result and set the
    # `disk.source.profile` field for each profile mount volume.
    volume_script += """
echo 'Updating disk resources...'

export MESOS_WORK_DIR MESOS_RESOURCES
eval $(sed -E "s/^([A-Z_]+)=(.*)$/\\1='\\2'/" /opt/mesosphere/etc/mesos-slave-common)  # Set up `MESOS_WORK_DIR`.
eval $(sed -E "s/^([A-Z_]+)=(.*)$/\\1='\\2'/" /opt/mesosphere/etc/mesos-slave)         # Set up `MESOS_RESOURCES`.
source /opt/mesosphere/etc/mesos-slave-common
/opt/mesosphere/bin/make_disk_resources.py /var/lib/dcos/mesos-resources
source /var/lib/dcos/mesos-resources
/opt/mesosphere/bin/python -c "
import json;
import os;

profiles = {profiles}
resources = json.loads(os.environ['MESOS_RESOURCES'])

for r in resources:
    try:
        disk_source = r['disk']['source']
        disk_source['profile'] = profiles[disk_source['mount']['root']]
    except KeyError:
        pass

print('MESOS_RESOURCES=\\'' + json.dumps(resources) + '\\'')
" > /var/lib/dcos/mesos-resources

echo 'Restarting agent...'

systemctl restart dcos-mesos-slave.service
""".format(
        profiles={
            "/dcos/volume{}".format(i): p
            for i, p in enumerate(MOUNT_VOLUME_PROFILES) if p
        })

    cluster_info_path = os.getenv("CLUSTER_INFO_PATH", "cluster_info.json")
    if not os.path.exists(cluster_info_path):
        raise Exception("No cluster info to work with!")
    cluster_info_json = json.load(open(cluster_info_path))
    launcher = dcos_launch.get_launcher(cluster_info_json)
    description = launcher.describe()
    ssh = launcher.get_ssh_client()
    with ssh.tunnel(description["masters"][0]["public_ip"]) as t:
        t.copy_file(helpers.session_tempfile(ssh.key), "ssh_key")
        t.copy_file(helpers.session_tempfile(volume_script),
                    "volume_script.sh")
        t.command(["chmod", "600", "ssh_key"])
        ssh_command = ["ssh", "-i", "ssh_key"] + ssh_client.SHARED_SSH_OPTS
        scp_command = ["scp", "-i", "ssh_key"] + ssh_client.SHARED_SSH_OPTS
        for private_agent in description["private_agents"]:
            target = "{}@{}".format(ssh.user, private_agent["private_ip"])
            t.command(scp_command +
                      ["volume_script.sh", target + ":~/volume_script.sh"])
            t.command(ssh_command +
                      [target, "sudo", "bash", "volume_script.sh"])
        # nasty hack until we add a better post-flight
        time.sleep(60)