Exemplo n.º 1
0
def test_action_deploy_post(client, monkeypatch):
    route = '/api/v1/action/deploy'

    write_string('genconf/config.yaml', ssh_config_yaml)
    monkeypatch.setattr(dcos_installer.action_lib, '_get_bootstrap_tarball', lambda: '123')
    monkeypatch.setattr(dcos_installer.action_lib, '_add_copy_packages', lambda _: None)

    # Deploy should be already executed for action 'deploy'
    def mocked_json_state(arg):
        return {
            'hosts': {
                '127.0.0.1': {
                    'host_status': 'success'
                },
                '127.0.0.2': {
                    'host_status': 'success'
                }
            }
        }
    mock_json_state(monkeypatch, mocked_json_state)
    res = client.request(route, method='POST')
    assert res.json == {'status': 'deploy was already executed, skipping'}

    # Test start deploy
    mock_json_state(monkeypatch, lambda arg: False)
    res = client.request(route, method='POST')
    assert res.json == {'status': 'deploy started'}
Exemplo n.º 2
0
def do_bundle_onprem(extra_files, gen_out, output_dir):
    # We are only being called via dcos_generate_config.sh with an output_dir
    assert output_dir is not None
    assert output_dir
    assert output_dir[-1] != '/'
    output_dir = output_dir + '/'

    # Copy the extra_files
    for filename in extra_files:
        shutil.copy(filename, output_dir + filename)

    # Copy the cluster packages
    for name, info in gen_out.cluster_packages.items():
        copy_makedirs(info['filename'], output_dir + info['filename'])

    # Write an index of the cluster packages
    write_json(output_dir + 'cluster-package-info.json', gen_out.cluster_packages)

    # Write the bootstrap id
    write_string(output_dir + 'bootstrap.latest', gen_out.arguments['bootstrap_id'])

    # Make a package fetch script
    package_fetches = "\n".join(
        fetch_pkg_template.format(
            package_path='packages/{name}/{id}.tar.xz'.format(name=pkgpanda.PackageId(package).name, id=package),
            bootstrap_url='https://downloads.dcos.io/dcos/stable'
            ) for package in load_json("/artifacts/{}.active.json".format(gen_out.arguments['bootstrap_id'])))
    write_string(output_dir + 'fetch_packages.sh', fetch_all_pkgs.format(package_fetches=package_fetches))
Exemplo n.º 3
0
def generate_node_upgrade_script(gen_out, installed_cluster_version, serve_dir=SERVE_DIR):

    # installed_cluster_version: Current installed version on the cluster
    # installer_version: Version we are upgrading to

    bootstrap_url = gen_out.arguments['bootstrap_url']

    installer_version = gen.calc.entry['must']['dcos_version']

    package_list = ' '.join(package['id'] for package in gen_out.cluster_packages.values())

    bash_script = gen.template.parse_str(node_upgrade_template).render({
        'dcos_image_commit': util.dcos_image_commit,
        'generation_date': util.template_generation_date,
        'bootstrap_url': bootstrap_url,
        'cluster_packages': package_list,
        'installed_cluster_version': installed_cluster_version,
        'installer_version': installer_version})

    upgrade_script_path = '/upgrade/' + uuid.uuid4().hex

    subprocess.check_call(['mkdir', '-p', serve_dir + upgrade_script_path])

    write_string(serve_dir + upgrade_script_path + '/dcos_node_upgrade.sh', bash_script)

    print("Node upgrade script URL: " + bootstrap_url + upgrade_script_path + '/dcos_node_upgrade.sh')

    return 0
Exemplo n.º 4
0
Arquivo: actions.py Projeto: dcos/dcos
def setup(install, repository):
    """Set up a fresh install of DC/OS.

    install: pkgpanda.Install
    repository: pkgpanda.Repository

    """
    # Check for /opt/mesosphere/bootstrap. If not exists, download everything
    # and install /etc/systemd/system/mutli-user.target/dcos.target
    bootstrap_path = os.path.join(install.root, "bootstrap")
    if os.path.exists(bootstrap_path):
        # Write, enable /etc/systemd/system/dcos.target for next boot.
        dcos_target_dir = os.path.dirname(install.systemd_dir)
        try:
            os.makedirs(dcos_target_dir)
        except FileExistsError:
            pass

        write_string(os.path.join(dcos_target_dir, "dcos.target"),
                     DCOS_TARGET_CONTENTS)
        _do_bootstrap(install, repository)
        # Enable dcos.target only after we have populated it to prevent starting
        # up stuff inside of it before we activate the new set of packages.
        if install.manage_systemd:
            _start_dcos_target(block_systemd=True)
        os.remove(bootstrap_path)

    # Check for /opt/mesosphere/install_progress. If found, recover the partial
    # update.
    if os.path.exists(install_root + "/install_progress"):
        took_action, msg = install.recover_swap_active()
        if not took_action:
            print("No recovery performed: {}".format(msg))
Exemplo n.º 5
0
    def mark_latest():
        # Ensure latest is always written
        write_string(latest_name, bootstrap_id)

        print("bootstrap: {}".format(bootstrap_name))
        print("active: {}".format(active_name))
        print("latest: {}".format(latest_name))
        return bootstrap_id
Exemplo n.º 6
0
def main():
    options = check_environment()
    bw = test_util.aws.BotoWrapper(
        region=options.aws_region,
        aws_access_key_id=options.aws_access_key_id,
        aws_secret_access_key=options.aws_secret_access_key)
    stack_name = 'dcos-ci-test-cf-{}'.format(random_id(10))
    ssh_key = bw.create_key_pair(stack_name)
    write_string('ssh_key', ssh_key)
    log.info('Spinning up AWS CloudFormation with ID: {}'.format(stack_name))
    if options.advanced:
        cf, ssh_info = test_util.aws.DcosZenCfStack.create(
            stack_name=stack_name,
            boto_wrapper=bw,
            template_url=options.template_url,
            private_agents=options.agents,
            public_agents=options.public_agents,
            key_pair_name=stack_name,
            private_agent_type='m3.xlarge',
            public_agent_type='m3.xlarge',
            master_type='m3.xlarge',
            vpc=options.vpc,
            gateway=options.gateway,
            private_subnet=options.private_subnet,
            public_subnet=options.public_subnet)
    else:
        cf, ssh_info = test_util.aws.DcosCfStack.create(
            stack_name=stack_name,
            template_url=options.template_url,
            private_agents=options.agents,
            public_agents=options.public_agents,
            admin_location='0.0.0.0/0',
            key_pair_name=stack_name,
            boto_wrapper=bw)
    time.sleep(300)  # we know the cluster is not ready yet, don't poll to avoid hitting the rate limit
    cf.wait_for_complete()
    # Resiliency testing requires knowing the stack name
    options.test_cmd = 'AWS_STACK_NAME=' + stack_name + ' ' + options.test_cmd

    # hidden hook where user can supply an ssh_key for a preexisting cluster
    cluster = test_util.cluster.Cluster.from_cloudformation(cf, ssh_info, ssh_key)

    result = test_util.cluster.run_integration_tests(
        cluster,
        region=options.aws_region,
        aws_access_key_id=options.aws_access_key_id,
        aws_secret_access_key=options.aws_secret_access_key,
        test_cmd=options.test_cmd,
    )
    if result == 0:
        log.info('Test successful! Deleting CloudFormation.')
        cf.delete()
        bw.delete_key_pair(stack_name)
    else:
        logging.warning('Test exited with an error')
    if options.ci_flags:
        result = 0  # Wipe the return code so that tests can be muted in CI
    sys.exit(result)
Exemplo n.º 7
0
def integration_test(
        tunnel, test_dir,
        dcos_dns, master_list, agent_list, public_agent_list,
        test_dns_search, provider, ci_flags,
        aws_access_key_id='', aws_secret_access_key='', region='', add_env=None):
    """Runs integration test on host

    Args:
        test_dir: directory to leave test_wrapper.sh
        dcos_dns: string representing IP of DCOS DNS host
        master_list: string of comma separated master addresses
        agent_list: string of comma separated agent addresses
        test_dns_search: if set to True, test for deployed mesos DNS app
        ci_flags: optional additional string to be passed to test
        provider: (str) either onprem, aws, or azure
        # The following variables correspond to currently disabled tests
        aws_access_key_id: needed for REXRAY tests
        aws_secret_access_key: needed for REXRAY tests
        region: string indicating AWS region in which cluster is running
        add_env: a python dict with any number of key=value assignments to be passed to
            the test environment
    """
    dns_search = 'true' if test_dns_search else 'false'
    test_env = [
        'DCOS_DNS_ADDRESS=http://'+dcos_dns,
        'MASTER_HOSTS='+','.join(master_list),
        'PUBLIC_MASTER_HOSTS='+','.join(master_list),
        'SLAVE_HOSTS='+','.join(agent_list),
        'PUBLIC_SLAVE_HOSTS='+','.join(public_agent_list),
        'DCOS_PROVIDER='+provider,
        'DNS_SEARCH='+dns_search,
        'AWS_ACCESS_KEY_ID='+aws_access_key_id,
        'AWS_SECRET_ACCESS_KEY='+aws_secret_access_key,
        'AWS_REGION='+region]
    if add_env:
        for key, value in add_env.items():
            extra_env = key + '=' + value
            test_env.append(extra_env)

    test_wrapper = """#!/bin/bash
source /opt/mesosphere/environment.export
{env}
cd /opt/mesosphere/active/dcos-integration-test
py.test -vv {ci_flags}
"""
    test_env_str = ''.join(['export '+e+'\n' for e in test_env])
    write_string('test_wrapper.sh', test_wrapper.format(env=test_env_str, ci_flags=ci_flags))

    wrapper_path = join(test_dir, 'test_wrapper.sh')
    log.info('Running integration test...')
    tunnel.write_to_remote('test_wrapper.sh', wrapper_path)
    try:
        tunnel.remote_cmd(['bash', wrapper_path], stdout=sys.stdout.buffer)
    except CalledProcessError as e:
        return e.returncode
    return 0
Exemplo n.º 8
0
def temp_data(key):
    temp_dir = tempfile.mkdtemp()
    socket_path = temp_dir + '/control_socket'
    key_path = temp_dir + '/key'
    write_string(key_path, key)
    os.chmod(key_path, stat.S_IREAD | stat.S_IWRITE)
    yield (socket_path, key_path)
    os.remove(key_path)
    # might have been deleted already if SSH exited correctly
    if os.path.exists(socket_path):
        os.remove(socket_path)
    os.rmdir(temp_dir)
Exemplo n.º 9
0
def mock_do_build_packages(cache_repository_url, tree_variants):
    make_directory('packages/cache/bootstrap')
    write_string("packages/cache/bootstrap/bootstrap_id.bootstrap.tar.xz", "bootstrap_contents")
    write_json("packages/cache/bootstrap/bootstrap_id.active.json", ['a--b', 'c--d'])
    write_string("packages/cache/bootstrap/bootstrap.latest", "bootstrap_id")
    write_string("packages/cache/bootstrap/installer.bootstrap.latest", "installer_bootstrap_id")
    write_json("packages/cache/bootstrap/installer_bootstrap_id.active.json", ['c--d', 'e--f'])
    write_string("packages/cache/bootstrap/downstream.installer.bootstrap.latest", "downstream_installer_bootstrap_id")
    write_json("packages/cache/bootstrap/downstream_installer_bootstrap_id.active.json", [])

    make_directory('packages/cache/complete')
    write_json(
        "packages/cache/complete/complete.latest.json",
        {'bootstrap': 'bootstrap_id', 'packages': ['a--b', 'c--d']})
    write_json(
        "packages/cache/complete/installer.complete.latest.json",
        {'bootstrap': 'installer_bootstrap_id', 'packages': ['c--d', 'e--f']})
    write_json(
        "packages/cache/complete/downstream.installer.complete.latest.json",
        {'bootstrap': 'installer_bootstrap_id', 'packages': []})

    return {
        None: {"bootstrap": "bootstrap_id", "packages": ["a--b", "c--d"]},
        "installer": {"bootstrap": "installer_bootstrap_id", "packages": ["c--d", "e--f"]},
        "downstream.installer": {"bootstrap": "downstream_installer_bootstrap_id", "packages": []}
    }
Exemplo n.º 10
0
def mock_do_build_packages(cache_repository_url):
    subprocess.check_call(['mkdir', '-p', 'packages/cache/bootstrap'])
    write_string("packages/cache/bootstrap/bootstrap_id.bootstrap.tar.xz", "bootstrap_contents")
    write_json("packages/cache/bootstrap/bootstrap_id.active.json", ['a--b', 'c--d'])
    write_string("packages/cache/bootstrap/bootstrap.latest", "bootstrap_id")
    write_string("packages/cache/bootstrap/installer.bootstrap.latest", "installer_bootstrap_id")
    write_json("packages/cache/bootstrap/installer_bootstrap_id.active.json", ['c--d', 'e--f'])
    write_string("packages/cache/bootstrap/ee.installer.bootstrap.latest", "ee_installer_bootstrap_id")
    write_json("packages/cache/bootstrap/ee_installer_bootstrap_id.active.json", [])

    subprocess.check_call(['mkdir', '-p', 'packages/cache/complete'])
    write_json(
        "packages/cache/complete/complete.latest.json",
        {'bootstrap': 'bootstrap_id', 'packages': ['a--b', 'c--d']})
    write_json(
        "packages/cache/complete/installer.complete.latest.json",
        {'bootstrap': 'installer_bootstrap_id', 'packages': ['c--d', 'e--f']})
    write_json(
        "packages/cache/complete/ee.installer.complete.latest.json",
        {'bootstrap': 'installer_bootstrap_id', 'packages': []})

    return {
        None: {"bootstrap": "bootstrap_id", "packages": ["a--b", "c--d"]},
        "installer": {"bootstrap": "installer_bootstrap_id", "packages": ["c--d", "e--f"]},
        "ee.installer": {"bootstrap": "ee_installer_bootstrap_id", "packages": []}
    }
Exemplo n.º 11
0
def wait(master_count_filename):
    if try_shortcut():
        log.info("Shortcut succeeeded, assuming local zk is in good config state, not waiting for quorum.")
        return
    log.info('Shortcut failed, waiting for exhibitor to bring up zookeeper and stabilize')

    if not os.path.exists(master_count_filename):
        log.info("master_count file doesn't exist when it should. Hard failing.")
        sys.exit(1)

    cluster_size = int(utils.read_file_line(master_count_filename))
    log.info('Expected cluster size: {}'.format(cluster_size))

    log.info('Waiting for ZooKeeper cluster to stabilize')
    try:
        response = requests.get(EXHIBITOR_STATUS_URL)
    except requests.exceptions.ConnectionError as ex:
        log.error('Could not connect to exhibitor: {}'.format(ex))
        sys.exit(1)
    if response.status_code != 200:
        log.error('Could not get exhibitor status: {}, Status code: {}'.format(
            EXHIBITOR_STATUS_URL, response.status_code))
        sys.exit(1)

    data = response.json()

    serving = []
    leaders = []
    for node in data:
        if node['isLeader']:
            leaders.append(node['hostname'])
        if node['description'] == 'serving':
            serving.append(node['hostname'])

    log.info(
        "Serving hosts: `%s`, leader: `%s`", ','.join(serving), ','.join(leaders))

    if len(serving) != cluster_size or len(leaders) != 1:
        msg_fmt = 'Expected {} servers and 1 leader, got {} servers and {} leaders'
        raise Exception(msg_fmt.format(cluster_size, len(serving), len(leaders)))

    # Local Zookeeper is up. Config should be stable, local zookeeper happy. Stash the PID so if
    # there is a restart we can come up quickly without requiring a new zookeeper quorum.
    zk_pid_mtime = get_zk_pid_mtime()
    if zk_pid_mtime is not None:
        log.info('Stashing zk.pid mtime %s to %s', zk_pid_mtime, stash_zk_pid_stat_mtime_path)
        write_string(stash_zk_pid_stat_mtime_path, str(zk_pid_mtime))
Exemplo n.º 12
0
def mock_do_build_packages(cache_repository_url):
    subprocess.check_call(["mkdir", "-p", "packages/cache/bootstrap"])
    write_string("packages/cache/bootstrap/bootstrap_id.bootstrap.tar.xz", "bootstrap_contents")
    write_json("packages/cache/bootstrap/bootstrap_id.active.json", ["a--b", "c--d"])
    write_string("packages/cache/bootstrap/bootstrap.latest", "bootstrap_id")
    write_string("packages/cache/bootstrap/installer.bootstrap.latest", "installer_bootstrap_id")
    write_json("packages/cache/bootstrap/installer_bootstrap_id.active.json", ["c--d", "e--f"])
    write_string("packages/cache/bootstrap/ee.installer.bootstrap.latest", "ee_installer_bootstrap_id")
    write_json("packages/cache/bootstrap/ee_installer_bootstrap_id.active.json", [])

    return {None: "bootstrap_id", "installer": "installer_bootstrap_id", "ee.installer": "ee_installer_bootstrap_id"}
Exemplo n.º 13
0
def temp_data(key: str) -> (str, str):
    """ Provides file paths for data required to establish the SSH tunnel
    Args:
        key: string containing the private SSH key

    Returns:
        (path_for_temp_socket_file, path_for_temp_ssh_key)
    """
    temp_dir = tempfile.mkdtemp()
    socket_path = temp_dir + '/control_socket'
    key_path = temp_dir + '/key'
    write_string(key_path, key)
    os.chmod(key_path, stat.S_IREAD | stat.S_IWRITE)
    yield (socket_path, key_path)
    os.remove(key_path)
    # might have been deleted already if SSH exited correctly
    if os.path.exists(socket_path):
        os.remove(socket_path)
    os.rmdir(temp_dir)
Exemplo n.º 14
0
Arquivo: util.py Projeto: branden/dcos
def do_bundle_onprem(gen_out, output_dir):
    # We are only being called via dcos_generate_config.sh with an output_dir
    assert output_dir is not None
    assert output_dir
    assert output_dir[-1] != '/'
    output_dir = output_dir + '/'

    # Copy generated artifacts
    for filename in gen_out.channel_artifacts + gen_out.stable_artifacts:
        copy_makedirs(filename, output_dir + filename)

    # Write an index of the cluster packages
    write_json(output_dir + 'cluster-package-info.json', gen_out.cluster_packages)

    # Write the bootstrap id
    write_string(output_dir + 'bootstrap.latest', gen_out.arguments['bootstrap_id'])

    # Write cluster package list ID
    write_string(output_dir + 'cluster-package-list.latest', gen_out.arguments['cluster_package_list_id'])
Exemplo n.º 15
0
def do_bundle_onprem(extra_files, gen_out, output_dir):
    # We are only being called via dcos_generate_config.sh with an output_dir
    assert output_dir is not None
    assert output_dir
    assert output_dir[-1] != '/'
    output_dir = output_dir + '/'

    # Copy the extra_files
    for filename in extra_files:
        shutil.copy(filename, output_dir + filename)

    # Copy the cluster packages
    for name, info in gen_out.cluster_packages.items():
        copy_makedirs(info['filename'], output_dir + info['filename'])

    # Write an index of the cluster packages
    write_json(output_dir + 'cluster-package-info.json', gen_out.cluster_packages)

    # Write the bootstrap id
    write_string(output_dir + 'bootstrap.latest', gen_out.arguments['bootstrap_id'])
Exemplo n.º 16
0
Arquivo: util.py Projeto: mjkam/dcos
def do_bundle_onprem(extra_files, gen_out, output_dir):
    # We are only being called via dcos_generate_config.sh with an output_dir
    assert output_dir is not None
    assert output_dir
    assert output_dir[-1] != '/'
    output_dir = output_dir + '/'

    # Copy the extra_files
    for filename in extra_files:
        copy_makedirs(filename, output_dir + filename)

    # Copy the config packages
    for package_name in json.loads(gen_out.arguments['config_package_names']):
        filename = gen_out.cluster_packages[package_name]['filename']
        copy_makedirs(filename, output_dir + filename)

    # Write an index of the cluster packages
    write_json(output_dir + 'cluster-package-info.json', gen_out.cluster_packages)

    # Write the bootstrap id
    write_string(output_dir + 'bootstrap.latest', gen_out.arguments['bootstrap_id'])
Exemplo n.º 17
0
def test_action_deploy_retry(client, monkeypatch):
    route = '/api/v1/action/deploy'

    write_string('genconf/config.yaml', ssh_config_yaml)
    monkeypatch.setattr(dcos_installer.action_lib, '_get_bootstrap_tarball', lambda: '123')
    monkeypatch.setattr(dcos_installer.action_lib, '_get_cluster_package_list', lambda: '123')
    monkeypatch.setattr(dcos_installer.action_lib, '_add_copy_packages', lambda _: None)
    monkeypatch.setattr(dcos_installer.action_lib, '_read_state_file', lambda state_file: {'total_hosts': 2})

    removed_hosts = list()

    def mocked_remove_host(state_file, host):
        removed_hosts.append(host)

    monkeypatch.setattr(dcos_installer.action_lib, '_remove_host', mocked_remove_host)

    # Test retry
    def mocked_json_state(arg):
        return {
            'hosts': {
                '127.0.0.1:22': {
                    'host_status': 'failed',
                    'tags': {'role': 'master', 'dcos_install_param': 'master'},
                },
                '127.0.0.2:22022': {
                    'host_status': 'success'
                },
                '127.0.0.3:22022': {
                    'host_status': 'failed',
                    'tags': {'role': 'agent', 'dcos_install_param': 'slave'},
                }
            }
        }

    mock_json_state(monkeypatch, mocked_json_state)
    res = client.post(route, params={'retry': 'true'}, content_type='application/x-www-form-urlencoded')
    assert res.json == {'details': ['127.0.0.1:22', '127.0.0.3:22022'], 'status': 'retried'}
    assert len(set(removed_hosts)) == 2, \
        "Should have had two hosts removed exactly once, removed_hosts: {}".format(removed_hosts)
    assert set(removed_hosts) == {'127.0.0.3:22022', '127.0.0.1:22'}
Exemplo n.º 18
0
def mock_do_build_packages(cache_repository_url):
    subprocess.check_call(['mkdir', '-p', 'packages'])
    write_string("packages/bootstrap_id.bootstrap.tar.xz", "bootstrap_contents")
    write_json("packages/bootstrap_id.active.json", ['a--b', 'c--d'])
    write_string("packages/bootstrap.latest", "bootstrap_id")
    write_string("packages/installer.bootstrap.latest", "installer_bootstrap_id")
    write_json("packages/installer_bootstrap_id.active.json", ['c--d', 'e--f'])
    write_string("packages/ee.installer.bootstrap.latest", "ee_installer_bootstrap_id")
    write_json("packages/ee_installer_bootstrap_id.active.json", [])

    return {
        None: "bootstrap_id",
        "installer": "installer_bootstrap_id",
        "ee.installer": "ee_installer_bootstrap_id"
    }
Exemplo n.º 19
0
def mock_do_build_packages(cache_repository_url, skip_build):
    subprocess.check_call(['mkdir', '-p', 'packages'])
    write_string("packages/bootstrap_id.bootstrap.tar.xz",
                 "bootstrap_contents")
    write_json("packages/bootstrap_id.active.json", ['a--b', 'c--d'])
    write_string("packages/bootstrap.latest", "bootstrap_id")
    write_string("packages/installer.bootstrap.latest",
                 "installer_bootstrap_id")
    write_json("packages/installer_bootstrap_id.active.json", ['c--d', 'e--f'])
    write_string("packages/ee.installer.bootstrap.latest",
                 "ee_installer_bootstrap_id")
    write_json("packages/ee_installer_bootstrap_id.active.json", [])

    return {
        None: "bootstrap_id",
        "installer": "installer_bootstrap_id",
        "ee.installer": "ee_installer_bootstrap_id"
    }
Exemplo n.º 20
0
def build(package_store, name, variant, clean_after_build, recursive=False):
    assert isinstance(package_store, PackageStore)
    print("Building package {} variant {}".format(name, pkgpanda.util.variant_str(variant)))
    tmpdir = tempfile.TemporaryDirectory(prefix="pkgpanda_repo")
    repository = Repository(tmpdir.name)

    package_dir = package_store.get_package_folder(name)

    def src_abs(name):
        return package_dir + '/' + name

    def cache_abs(filename):
        return package_store.get_package_cache_folder(name) + '/' + filename

    # Build pkginfo over time, translating fields from buildinfo.
    pkginfo = {}

    # Build up the docker command arguments over time, translating fields as needed.
    cmd = DockerCmd()

    assert (name, variant) in package_store.packages, \
        "Programming error: name, variant should have been validated to be valid before calling build()."

    builder = IdBuilder(package_store.get_buildinfo(name, variant))
    final_buildinfo = dict()

    builder.add('name', name)
    builder.add('variant', pkgpanda.util.variant_str(variant))

    # Convert single_source -> sources
    if builder.has('sources'):
        if builder.has('single_source'):
            raise BuildError('Both sources and single_source cannot be specified at the same time')
        sources = builder.take('sources')
    elif builder.has('single_source'):
        sources = {name: builder.take('single_source')}
        builder.replace('single_source', 'sources', sources)
    else:
        builder.add('sources', {})
        sources = dict()
        print("NOTICE: No sources specified")

    final_buildinfo['sources'] = sources

    # Construct the source fetchers, gather the checkout ids from them
    checkout_ids = dict()
    fetchers = dict()
    try:
        for src_name, src_info in sorted(sources.items()):
            # TODO(cmaloney): Switch to a unified top level cache directory shared by all packages
            cache_dir = package_store.get_package_cache_folder(name) + '/' + src_name
            check_call(['mkdir', '-p', cache_dir])
            fetcher = get_src_fetcher(src_info, cache_dir, package_dir)
            fetchers[src_name] = fetcher
            checkout_ids[src_name] = fetcher.get_id()
    except ValidationError as ex:
        raise BuildError("Validation error when fetching sources for package: {}".format(ex))

    for src_name, checkout_id in checkout_ids.items():
        # NOTE: single_source buildinfo was expanded above so the src_name is
        # always correct here.
        # Make sure we never accidentally overwrite something which might be
        # important. Fields should match if specified (And that should be
        # tested at some point). For now disallowing identical saves hassle.
        assert_no_duplicate_keys(checkout_id, final_buildinfo['sources'][src_name])
        final_buildinfo['sources'][src_name].update(checkout_id)

    # Add the sha1 of the buildinfo.json + build file to the build ids
    builder.update('sources', checkout_ids)
    build_script = src_abs(builder.take('build_script'))
    # TODO(cmaloney): Change dest name to build_script_sha1
    builder.replace('build_script', 'build', pkgpanda.util.sha1(build_script))
    builder.add('pkgpanda_version', pkgpanda.build.constants.version)

    extra_dir = src_abs("extra")
    # Add the "extra" folder inside the package as an additional source if it
    # exists
    if os.path.exists(extra_dir):
        extra_id = hash_folder(extra_dir)
        builder.add('extra_source', extra_id)
        final_buildinfo['extra_source'] = extra_id

    # Figure out the docker name.
    docker_name = builder.take('docker')
    cmd.container = docker_name

    # Add the id of the docker build environment to the build_ids.
    try:
        docker_id = get_docker_id(docker_name)
    except CalledProcessError:
        # docker pull the container and try again
        check_call(['docker', 'pull', docker_name])
        docker_id = get_docker_id(docker_name)

    builder.update('docker', docker_id)

    # TODO(cmaloney): The environment variables should be generated during build
    # not live in buildinfo.json.
    pkginfo['environment'] = builder.take('environment')

    # Whether pkgpanda should on the host make sure a `/var/lib` state directory is available
    pkginfo['state_directory'] = builder.take('state_directory')
    if pkginfo['state_directory'] not in [True, False]:
        raise BuildError("state_directory in buildinfo.json must be a boolean `true` or `false`")

    username = None
    if builder.has('username'):
        username = builder.take('username')
        if not isinstance(username, str):
            raise BuildError("username in buildinfo.json must be either not set (no user for this"
                             " package), or a user name string")
        try:
            pkgpanda.UserManagement.validate_username(username)
        except ValidationError as ex:
            raise BuildError("username in buildinfo.json didn't meet the validation rules. {}".format(ex))
        pkginfo['username'] = username

    group = None
    if builder.has('group'):
        group = builder.take('group')
        if not isinstance(group, str):
            raise BuildError("group in buildinfo.json must be either not set (use default group for this user)"
                             ", or group must be a string")
        try:
            pkgpanda.UserManagement.validate_group_name(group)
        except ValidationError as ex:
            raise BuildError("group in buildinfo.json didn't meet the validation rules. {}".format(ex))
        pkginfo['group'] = group

    # Packages need directories inside the fake install root (otherwise docker
    # will try making the directories on a readonly filesystem), so build the
    # install root now, and make the package directories in it as we go.
    install_dir = tempfile.mkdtemp(prefix="pkgpanda-")

    active_packages = list()
    active_package_ids = set()
    active_package_variants = dict()
    auto_deps = set()

    # Final package has the same requires as the build.
    requires = builder.take('requires')
    pkginfo['requires'] = requires

    if builder.has("sysctl"):
        pkginfo["sysctl"] = builder.take("sysctl")

    # TODO(cmaloney): Pull generating the full set of requires a function.
    to_check = copy.deepcopy(requires)
    if type(to_check) != list:
        raise BuildError("`requires` in buildinfo.json must be an array of dependencies.")
    while to_check:
        requires_info = to_check.pop(0)
        requires_name, requires_variant = expand_require(requires_info)

        if requires_name in active_package_variants:
            # TODO(cmaloney): If one package depends on the <default>
            # variant of a package and 1+ others depends on a non-<default>
            # variant then update the dependency to the non-default variant
            # rather than erroring.
            if requires_variant != active_package_variants[requires_name]:
                # TODO(cmaloney): Make this contain the chains of
                # dependencies which contain the conflicting packages.
                # a -> b -> c -> d {foo}
                # e {bar} -> d {baz}
                raise BuildError(
                    "Dependncy on multiple variants of the same package {}. variants: {} {}".format(
                        requires_name,
                        requires_variant,
                        active_package_variants[requires_name]))

            # The variant has package {requires_name, variant} already is a
            # dependency, don't process it again / move on to the next.
            continue

        active_package_variants[requires_name] = requires_variant

        # Figure out the last build of the dependency, add that as the
        # fully expanded dependency.
        requires_last_build = package_store.get_last_build_filename(requires_name, requires_variant)
        if not os.path.exists(requires_last_build):
            if recursive:
                # Build the dependency
                build(package_store, requires_name, requires_variant, clean_after_build, recursive)
            else:
                raise BuildError("No last build file found for dependency {} variant {}. Rebuild "
                                 "the dependency".format(requires_name, requires_variant))

        try:
            pkg_id_str = load_string(requires_last_build)
            auto_deps.add(pkg_id_str)
            pkg_buildinfo = package_store.get_buildinfo(requires_name, requires_variant)
            pkg_requires = pkg_buildinfo['requires']
            pkg_path = repository.package_path(pkg_id_str)
            pkg_tar = pkg_id_str + '.tar.xz'
            if not os.path.exists(package_store.get_package_cache_folder(requires_name) + '/' + pkg_tar):
                raise BuildError(
                    "The build tarball {} refered to by the last_build file of the dependency {} "
                    "variant {} doesn't exist. Rebuild the dependency.".format(
                        pkg_tar,
                        requires_name,
                        requires_variant))

            active_package_ids.add(pkg_id_str)

            # Mount the package into the docker container.
            cmd.volumes[pkg_path] = "/opt/mesosphere/packages/{}:ro".format(pkg_id_str)
            os.makedirs(os.path.join(install_dir, "packages/{}".format(pkg_id_str)))

            # Add the dependencies of the package to the set which will be
            # activated.
            # TODO(cmaloney): All these 'transitive' dependencies shouldn't
            # be available to the package being built, only what depends on
            # them directly.
            to_check += pkg_requires
        except ValidationError as ex:
            raise BuildError("validating package needed as dependency {0}: {1}".format(requires_name, ex)) from ex
        except PackageError as ex:
            raise BuildError("loading package needed as dependency {0}: {1}".format(requires_name, ex)) from ex

    # Add requires to the package id, calculate the final package id.
    # NOTE: active_packages isn't fully constructed here since we lazily load
    # packages not already in the repository.
    builder.update('requires', list(active_package_ids))
    version_extra = None
    if builder.has('version_extra'):
        version_extra = builder.take('version_extra')

    build_ids = builder.get_build_ids()
    version_base = hash_checkout(build_ids)
    version = None
    if builder.has('version_extra'):
        version = "{0}-{1}".format(version_extra, version_base)
    else:
        version = version_base
    pkg_id = PackageId.from_parts(name, version)

    # Everything must have been extracted by now. If it wasn't, then we just
    # had a hard error that it was set but not used, as well as didn't include
    # it in the caluclation of the PackageId.
    builder = None

    # Save the build_ids. Useful for verify exactly what went into the
    # package build hash.
    final_buildinfo['build_ids'] = build_ids
    final_buildinfo['package_version'] = version

    # Save the package name and variant. The variant is used when installing
    # packages to validate dependencies.
    final_buildinfo['name'] = name
    final_buildinfo['variant'] = variant

    # If the package is already built, don't do anything.
    pkg_path = package_store.get_package_cache_folder(name) + '/{}.tar.xz'.format(pkg_id)

    # Done if it exists locally
    if exists(pkg_path):
        print("Package up to date. Not re-building.")

        # TODO(cmaloney): Updating / filling last_build should be moved out of
        # the build function.
        write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))

        return pkg_path

    # Try downloading.
    dl_path = package_store.try_fetch_by_id(pkg_id)
    if dl_path:
        print("Package up to date. Not re-building. Downloaded from repository-url.")
        # TODO(cmaloney): Updating / filling last_build should be moved out of
        # the build function.
        write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))
        print(dl_path, pkg_path)
        assert dl_path == pkg_path
        return pkg_path

    # Fall out and do the build since it couldn't be downloaded
    print("Unable to download from cache. Proceeding to build")

    print("Building package {} with buildinfo: {}".format(
        pkg_id,
        json.dumps(final_buildinfo, indent=2, sort_keys=True)))

    # Clean out src, result so later steps can use them freely for building.
    def clean():
        # Run a docker container to remove src/ and result/
        cmd = DockerCmd()
        cmd.volumes = {
            package_store.get_package_cache_folder(name): "/pkg/:rw",
        }
        cmd.container = "ubuntu:14.04.4"
        cmd.run("package-cleaner", ["rm", "-rf", "/pkg/src", "/pkg/result"])

    clean()

    # Only fresh builds are allowed which don't overlap existing artifacts.
    result_dir = cache_abs("result")
    if exists(result_dir):
        raise BuildError("result folder must not exist. It will be made when the package is "
                         "built. {}".format(result_dir))

    # 'mkpanda add' all implicit dependencies since we actually need to build.
    for dep in auto_deps:
        print("Auto-adding dependency: {}".format(dep))
        # NOTE: Not using the name pkg_id because that overrides the outer one.
        id_obj = PackageId(dep)
        add_package_file(repository, package_store.get_package_path(id_obj))
        package = repository.load(dep)
        active_packages.append(package)

    # Checkout all the sources int their respective 'src/' folders.
    try:
        src_dir = cache_abs('src')
        if os.path.exists(src_dir):
            raise ValidationError(
                "'src' directory already exists, did you have a previous build? " +
                "Currently all builds must be from scratch. Support should be " +
                "added for re-using a src directory when possible. src={}".format(src_dir))
        os.mkdir(src_dir)
        for src_name, fetcher in sorted(fetchers.items()):
            root = cache_abs('src/' + src_name)
            os.mkdir(root)

            fetcher.checkout_to(root)
    except ValidationError as ex:
        raise BuildError("Validation error when fetching sources for package: {}".format(ex))

    # Activate the packages so that we have a proper path, environment
    # variables.
    # TODO(cmaloney): RAII type thing for temproary directory so if we
    # don't get all the way through things will be cleaned up?
    install = Install(
        root=install_dir,
        config_dir=None,
        rooted_systemd=True,
        manage_systemd=False,
        block_systemd=True,
        fake_path=True,
        manage_users=False,
        manage_state_dir=False)
    install.activate(active_packages)
    # Rewrite all the symlinks inside the active path because we will
    # be mounting the folder into a docker container, and the absolute
    # paths to the packages will change.
    # TODO(cmaloney): This isn't very clean, it would be much nicer to
    # just run pkgpanda inside the package.
    rewrite_symlinks(install_dir, repository.path, "/opt/mesosphere/packages/")

    print("Building package in docker")

    # TODO(cmaloney): Run as a specific non-root user, make it possible
    # for non-root to cleanup afterwards.
    # Run the build, prepping the environment as necessary.
    mkdir(cache_abs("result"))

    # Copy the build info to the resulting tarball
    write_json(cache_abs("src/buildinfo.full.json"), final_buildinfo)
    write_json(cache_abs("result/buildinfo.full.json"), final_buildinfo)

    write_json(cache_abs("result/pkginfo.json"), pkginfo)

    # Make the folder for the package we are building. If docker does it, it
    # gets auto-created with root permissions and we can't actually delete it.
    os.makedirs(os.path.join(install_dir, "packages", str(pkg_id)))

    # TOOD(cmaloney): Disallow writing to well known files and directories?
    # Source we checked out
    cmd.volumes.update({
        # TODO(cmaloney): src should be read only...
        cache_abs("src"): "/pkg/src:rw",
        # The build script
        build_script: "/pkg/build:ro",
        # Getting the result out
        cache_abs("result"): "/opt/mesosphere/packages/{}:rw".format(pkg_id),
        install_dir: "/opt/mesosphere:ro"
    })

    if os.path.exists(extra_dir):
        cmd.volumes[extra_dir] = "/pkg/extra:ro"

    cmd.environment = {
        "PKG_VERSION": version,
        "PKG_NAME": name,
        "PKG_ID": pkg_id,
        "PKG_PATH": "/opt/mesosphere/packages/{}".format(pkg_id),
        "PKG_VARIANT": variant if variant is not None else "<default>",
        "NUM_CORES": multiprocessing.cpu_count()
    }

    try:
        # TODO(cmaloney): Run a wrapper which sources
        # /opt/mesosphere/environment then runs a build. Also should fix
        # ownership of /opt/mesosphere/packages/{pkg_id} post build.
        cmd.run("package-builder", [
            "/bin/bash",
            "-o", "nounset",
            "-o", "pipefail",
            "-o", "errexit",
            "/pkg/build"])
    except CalledProcessError as ex:
        raise BuildError("docker exited non-zero: {}\nCommand: {}".format(ex.returncode, ' '.join(ex.cmd)))

    # Clean up the temporary install dir used for dependencies.
    # TODO(cmaloney): Move to an RAII wrapper.
    check_call(['rm', '-rf', install_dir])

    print("Building package tarball")

    # Check for forbidden services before packaging the tarball:
    try:
        check_forbidden_services(cache_abs("result"), RESERVED_UNIT_NAMES)
    except ValidationError as ex:
        raise BuildError("Package validation failed: {}".format(ex))

    # TODO(cmaloney): Updating / filling last_build should be moved out of
    # the build function.
    write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))

    # Bundle the artifacts into the pkgpanda package
    tmp_name = pkg_path + "-tmp.tar.xz"
    make_tar(tmp_name, cache_abs("result"))
    os.rename(tmp_name, pkg_path)
    print("Package built.")
    if clean_after_build:
        clean()
    return pkg_path
Exemplo n.º 21
0
    def activate(self, packages):
        # Ensure the new set is reasonable.
        validate_compatible(packages, self.__roles)

        # Build the absolute paths for the running config, new config location,
        # and where to archive the config.
        active_names = self.get_active_names()
        active_dirs = list(map(self._make_abs, self.__well_known_dirs + ["active"]))

        new_names = [name + ".new" for name in active_names]
        new_dirs = [name + ".new" for name in active_dirs]

        old_names = [name + ".old" for name in active_names]

        # Remove all pre-existing new and old directories
        for name in chain(new_names, old_names):
            if os.path.exists(name):
                if os.path.isdir(name):
                    shutil.rmtree(name)
                else:
                    os.remove(name)

        # Make the directories for the new config
        for name in new_dirs:
            os.makedirs(name)

        def symlink_all(src, dest):
            if not os.path.isdir(src):
                return

            symlink_tree(src, dest)

        # Set the new LD_LIBRARY_PATH, PATH.
        env_contents = env_header.format("/opt/mesosphere" if self.__fake_path else self.__root)
        env_export_contents = env_export_header.format("/opt/mesosphere" if self.__fake_path else self.__root)

        active_buildinfo_full = {}

        dcos_service_configuration = self._get_dcos_configuration_template()

        # Building up the set of users
        sysusers = UserManagement(self.__manage_users, self.__add_users)

        def _get_service_files(_dir):
            service_files = []
            for root, directories, filenames in os.walk(_dir):
                for filename in filter(lambda name: name.endswith(".service"), filenames):
                    service_files.append(os.path.join(root, filename))
            return service_files

        def _get_service_names(_dir):
            service_files = list(map(os.path.basename, _get_service_files(_dir)))

            if not service_files:
                return []

            return list(map(lambda name: os.path.splitext(name)[0], service_files))

        # Add the folders, config in each package.
        for package in packages:
            # Package folders
            # NOTE: Since active is at the end of the folder list it will be
            # removed by the zip. This is the desired behavior, since it will be
            # populated later.
            # Do the basename since some well known dirs are full paths (dcos.target.wants)
            # while inside the packages they are always top level directories.
            for new, dir_name in zip(new_dirs, self.__well_known_dirs):
                dir_name = os.path.basename(dir_name)
                pkg_dir = os.path.join(package.path, dir_name)

                assert os.path.isabs(new)
                assert os.path.isabs(pkg_dir)

                try:
                    symlink_all(pkg_dir, new)

                    # Symlink all applicable role-based config
                    for role in self.__roles:
                        role_dir = os.path.join(package.path, "{0}_{1}".format(dir_name, role))
                        symlink_all(role_dir, new)

                except ConflictingFile as ex:
                    raise ValidationError("Two packages are trying to install the same file {0} or "
                                          "two roles in the set of roles {1} are causing a package "
                                          "to try activating multiple versions of the same file. "
                                          "One of the package files is {2}.".format(ex.dest,
                                                                                    self.__roles,
                                                                                    ex.src))

            # Add to the active folder
            os.symlink(package.path, os.path.join(self._make_abs("active.new"), package.name))

            # Add to the environment and environment.export contents

            env_contents += "# package: {0}\n".format(package.id)
            env_export_contents += "# package: {0}\n".format(package.id)

            for k, v in package.environment.items():
                env_contents += "{0}={1}\n".format(k, v)
                env_export_contents += "export {0}={1}\n".format(k, v)

            env_contents += "\n"
            env_export_contents += "\n"

            # Add to the buildinfo
            try:
                active_buildinfo_full[package.name] = load_json(os.path.join(package.path, "buildinfo.full.json"))
            except FileNotFoundError:
                # TODO(cmaloney): These only come from setup-packages. Should update
                # setup-packages to add a buildinfo.full for those packages
                active_buildinfo_full[package.name] = None

            # NOTE: It is critical the state dir, the package name and the user name are all the
            # same. Otherwise on upgrades we might remove access to a files by changing their chown
            # to something incompatible. We survive the first upgrade because everything goes from
            # root to specific users, and root can access all user files.
            if package.username is not None:
                sysusers.add_user(package.username, package.group)

            # Ensure the state directory exists
            # TODO(cmaloney): On upgrade take a snapshot?
            if self.__manage_state_dir:
                state_dir_path = self.__state_dir_root + '/' + package.name
                if package.state_directory:
                    check_call(['mkdir', '-p', state_dir_path])

                    if package.username:
                        uid = sysusers.get_uid(package.username)
                        check_call(['chown', '-R', str(uid), state_dir_path])

            if package.sysctl:
                service_names = _get_service_names(package.path)

                if not service_names:
                    raise ValueError("service name required for sysctl could not be determined for {package}".format(
                        package=package.id))

                for service in service_names:
                    if service in package.sysctl:
                        dcos_service_configuration["sysctl"][service] = package.sysctl[service]

        dcos_service_configuration_file = os.path.join(self._make_abs("etc.new"), DCOS_SERVICE_CONFIGURATION_FILE)
        write_json(dcos_service_configuration_file, dcos_service_configuration)

        # Write out the new environment file.
        new_env = self._make_abs("environment.new")
        write_string(new_env, env_contents)

        # Write out the new environment.export file
        new_env_export = self._make_abs("environment.export.new")
        write_string(new_env_export, env_export_contents)

        # Write out the buildinfo of every active package
        new_buildinfo_meta = self._make_abs("active.buildinfo.full.json.new")
        write_json(new_buildinfo_meta, active_buildinfo_full)

        self.swap_active(".new")
Exemplo n.º 22
0
def make_bootstrap_tarball(package_store, packages, variant):
    # Convert filenames to package ids
    pkg_ids = list()
    for pkg_path in packages:
        # Get the package id from the given package path
        filename = os.path.basename(pkg_path)
        if not filename.endswith(".tar.xz"):
            raise BuildError("Packages must be packaged / end with a .tar.xz. Got {}".format(filename))
        pkg_id = filename[:-len(".tar.xz")]
        pkg_ids.append(pkg_id)

    bootstrap_cache_dir = package_store.get_bootstrap_cache_dir()

    # Filename is output_name.<sha-1>.{active.json|.bootstrap.tar.xz}
    bootstrap_id = hash_checkout(pkg_ids)
    latest_name = "{}/{}bootstrap.latest".format(bootstrap_cache_dir, pkgpanda.util.variant_prefix(variant))

    output_name = bootstrap_cache_dir + '/' + bootstrap_id + '.'

    # bootstrap tarball = <sha1 of packages in tarball>.bootstrap.tar.xz
    bootstrap_name = "{}bootstrap.tar.xz".format(output_name)
    active_name = "{}active.json".format(output_name)

    def mark_latest():
        # Ensure latest is always written
        write_string(latest_name, bootstrap_id)

        print("bootstrap: {}".format(bootstrap_name))
        print("active: {}".format(active_name))
        print("latest: {}".format(latest_name))
        return bootstrap_id

    if (os.path.exists(bootstrap_name)):
        print("Bootstrap already up to date, not recreating")
        return mark_latest()

    check_call(['mkdir', '-p', bootstrap_cache_dir])

    # Try downloading.
    if package_store.try_fetch_bootstrap_and_active(bootstrap_id):
        print("Bootstrap already up to date, Not recreating. Downloaded from repository-url.")
        return mark_latest()

    print("Unable to download from cache. Building.")

    print("Creating bootstrap tarball for variant {}".format(variant))

    work_dir = tempfile.mkdtemp(prefix='mkpanda_bootstrap_tmp')

    def make_abs(path):
        return os.path.join(work_dir, path)

    pkgpanda_root = make_abs("opt/mesosphere")
    repository = Repository(os.path.join(pkgpanda_root, "packages"))

    # Fetch all the packages to the root
    for pkg_path in packages:
        filename = os.path.basename(pkg_path)
        pkg_id = filename[:-len(".tar.xz")]

        def local_fetcher(id, target):
            shutil.unpack_archive(pkg_path, target, "gztar")
        repository.add(local_fetcher, pkg_id, False)

    # Activate the packages inside the repository.
    # Do generate dcos.target.wants inside the root so that we don't
    # try messing with /etc/systemd/system.
    install = Install(
        root=pkgpanda_root,
        config_dir=None,
        rooted_systemd=True,
        manage_systemd=False,
        block_systemd=True,
        fake_path=True,
        skip_systemd_dirs=True,
        manage_users=False,
        manage_state_dir=False)
    install.activate(repository.load_packages(pkg_ids))

    # Mark the tarball as a bootstrap tarball/filesystem so that
    # dcos-setup.service will fire.
    make_file(make_abs("opt/mesosphere/bootstrap"))

    # Write out an active.json for the bootstrap tarball
    write_json(active_name, pkg_ids)

    # Rewrite all the symlinks to point to /opt/mesosphere
    rewrite_symlinks(work_dir, work_dir, "/")

    make_tar(bootstrap_name, pkgpanda_root)

    shutil.rmtree(work_dir)

    # Update latest last so that we don't ever use partially-built things.
    write_string(latest_name, bootstrap_id)

    print("Built bootstrap")
    return mark_latest()
Exemplo n.º 23
0
def integration_test(
        tunnel, test_dir,
        dcos_dns, master_list, agent_list, public_agent_list,
        test_dns_search, provider, ci_flags, timeout=None,
        aws_access_key_id='', aws_secret_access_key='', region='', add_env=None):
    """Runs integration test on host

    Args:
        test_dir: string representing host where integration_test.py exists on test_host
        dcos_dns: string representing IP of DCOS DNS host
        master_list: string of comma separated master addresses
        agent_list: string of comma separated agent addresses
        test_dns_search: if set to True, test for deployed mesos DNS app
        ci_flags: optional additional string to be passed to test
        provider: (str) either onprem, aws, or azure
        # The following variables correspond to currently disabled tests
        aws_access_key_id: needed for REXRAY tests
        aws_secret_access_key: needed for REXRAY tests
        region: string indicating AWS region in which cluster is running
        add_env: a python dict with any number of key=value assignments to be passed to
            the test environment
    """
    test_script = pkg_filename('integration_test.py')
    pytest_docker = pkg_filename('docker/py.test/Dockerfile')

    dns_search = 'true' if test_dns_search else 'false'
    test_env = [
        'DCOS_DNS_ADDRESS=http://'+dcos_dns,
        'MASTER_HOSTS='+','.join(master_list),
        'PUBLIC_MASTER_HOSTS='+','.join(master_list),
        'SLAVE_HOSTS='+','.join(agent_list),
        'PUBLIC_SLAVE_HOSTS='+','.join(public_agent_list),
        'REGISTRY_HOST=127.0.0.1',
        'DCOS_PROVIDER='+provider,
        'DNS_SEARCH='+dns_search,
        'AWS_ACCESS_KEY_ID='+aws_access_key_id,
        'AWS_SECRET_ACCESS_KEY='+aws_secret_access_key,
        'AWS_REGION='+region]
    if add_env:
        for key, value in add_env.items():
            extra_env = key + '=' + value
            test_env.append(extra_env)
    test_env = ['export '+e+'\n' for e in test_env]
    test_env = ''.join(test_env)
    test_cmd = 'py.test -vv ' + ci_flags + ' /integration_test.py'

    log.info('Building py.test image')
    # Make a clean docker context
    temp_dir = tempfile.mkdtemp()
    cmd_script = """
#!/bin/bash
set -euo pipefail; set -x
{test_env}
{test_cmd}
""".format(test_env=test_env, test_cmd=test_cmd)
    write_string(join(temp_dir, 'test_wrapper.sh'), cmd_script)
    check_call(['cp', test_script, join(temp_dir, 'integration_test.py')])
    check_call(['cp', pytest_docker, join(temp_dir, 'Dockerfile')])
    check_call(['docker', 'build', '-t', 'py.test', temp_dir])

    log.info('Exporting py.test image')
    pytest_image_tar = 'DCOS_integration_test.tar'
    check_call(['docker', 'save', '-o', join(temp_dir, pytest_image_tar), 'py.test'])

    log.info('Transferring py.test image')
    tunnel.write_to_remote(join(temp_dir, pytest_image_tar), join(test_dir, pytest_image_tar))
    log.info('Loading py.test image on remote host')
    tunnel.remote_cmd(['docker', 'load', '-i', join(test_dir, pytest_image_tar)])

    test_container_name = 'int_test_' + str(int(time.time()))
    docker_cmd = ['docker', 'run', '--net=host', '--name='+test_container_name, 'py.test']
    try:
        with remote_port_forwarding(tunnel, agent_list+public_agent_list, join(test_dir, 'ssh_key')):
            log.info('Running integration test...')
            try:
                tunnel.remote_cmd(docker_cmd, timeout=timeout)
            except CalledProcessError:
                log.exception('Test failed!')
                if ci_flags:
                    return False
                raise
        log.info('Successful test run!')
    except TimeoutExpired:
        log.error('Test failed due to timing out after {} seconds'.format(timeout))
        raise
    finally:
        get_logs_cmd = ['docker', 'logs', test_container_name]
        test_log = tunnel.remote_cmd(get_logs_cmd)
        log_file = 'integration_test.log'
        with open(log_file, 'wb') as fh:
            fh.write(test_log)
        log.info('Logs from test container can be found in '+log_file)

    return True
Exemplo n.º 24
0
def integration_test(
        tunnel, test_dir,
        dcos_dns, master_list, agent_list, public_agent_list,
        variant, test_dns_search, provider, ci_flags, timeout=None,
        aws_access_key_id='', aws_secret_access_key='', region='', add_env=None):
    """Runs integration test on host

    Args:
        test_dir: string representing host where integration_test.py exists on test_host
        dcos_dns: string representing IP of DCOS DNS host
        master_list: string of comma separated master addresses
        agent_list: string of comma separated agent addresses
        variant: 'ee' or 'default'
        test_dns_search: if set to True, test for deployed mesos DNS app
        ci_flags: optional additional string to be passed to test
        provider: (str) either onprem, aws, or azure
        # The following variables correspond to currently disabled tests
        aws_access_key_id: needed for REXRAY tests
        aws_secret_access_key: needed for REXRAY tests
        region: string indicating AWS region in which cluster is running
        add_env: a python dict with any number of key=value assignments to be passed to
            the test environment
    """
    test_script = pkg_filename('integration_test.py')
    pytest_docker = pkg_filename('docker/py.test/Dockerfile')

    dns_search = 'true' if test_dns_search else 'false'
    test_env = [
        'DCOS_DNS_ADDRESS=http://'+dcos_dns,
        'MASTER_HOSTS='+','.join(master_list),
        'PUBLIC_MASTER_HOSTS='+','.join(master_list),
        'SLAVE_HOSTS='+','.join(agent_list),
        'PUBLIC_SLAVE_HOSTS='+','.join(public_agent_list),
        'REGISTRY_HOST=127.0.0.1',
        'DCOS_VARIANT='+variant,
        'DCOS_PROVIDER='+provider,
        'DNS_SEARCH='+dns_search,
        'AWS_ACCESS_KEY_ID='+aws_access_key_id,
        'AWS_SECRET_ACCESS_KEY='+aws_secret_access_key,
        'AWS_REGION='+region]
    if add_env:
        for key, value in add_env.items():
            extra_env = key + '=' + value
            test_env.append(extra_env)
    test_env = ['export '+e+'\n' for e in test_env]
    test_env = ''.join(test_env)
    test_cmd = 'py.test -vv ' + ci_flags + ' /integration_test.py'

    log.info('Building py.test image')
    # Make a clean docker context
    temp_dir = tempfile.mkdtemp()
    cmd_script = """
#!/bin/bash
set -euo pipefail; set -x
{test_env}
{test_cmd}
""".format(test_env=test_env, test_cmd=test_cmd)
    write_string(join(temp_dir, 'test_wrapper.sh'), cmd_script)
    check_call(['cp', test_script, join(temp_dir, 'integration_test.py')])
    check_call(['cp', pytest_docker, join(temp_dir, 'Dockerfile')])
    check_call(['docker', 'build', '-t', 'py.test', temp_dir])

    log.info('Exporting py.test image')
    pytest_image_tar = 'DCOS_integration_test.tar'
    check_call(['docker', 'save', '-o', join(temp_dir, pytest_image_tar), 'py.test'])

    log.info('Transferring py.test image')
    tunnel.write_to_remote(join(temp_dir, pytest_image_tar), join(test_dir, pytest_image_tar))
    log.info('Loading py.test image on remote host')
    tunnel.remote_cmd(['docker', 'load', '-i', join(test_dir, pytest_image_tar)])

    test_container_name = 'int_test_' + str(int(time.time()))
    docker_cmd = ['docker', 'run', '--net=host', '--name='+test_container_name, 'py.test']
    try:
        with remote_port_forwarding(tunnel, agent_list+public_agent_list, join(test_dir, 'ssh_key')):
            log.info('Running integration test...')
            try:
                tunnel.remote_cmd(docker_cmd, timeout=timeout)
            except CalledProcessError:
                log.exception('Test failed!')
                if ci_flags:
                    return False
                raise
        log.info('Successful test run!')
    except TimeoutExpired:
        log.error('Test failed due to timing out after {} seconds'.format(timeout))
        raise
    finally:
        get_logs_cmd = ['docker', 'logs', test_container_name]
        test_log = tunnel.remote_cmd(get_logs_cmd)
        log_file = 'integration_test.log'
        with open(log_file, 'wb') as fh:
            fh.write(test_log)
        log.info('Logs from test container can be found in '+log_file)

    return True
Exemplo n.º 25
0
def main():
    options = check_environment()

    unique_cluster_id = "dcos-ci-test-onprem-{}".format(random_id(10))
    log.info("Spinning up AWS VPC with ID: {}".format(unique_cluster_id))
    if options.test_install_prereqs:
        os_name = "cent-os-7"
    else:
        os_name = "cent-os-7-dcos-prereqs"
    bw = test_util.aws.BotoWrapper(
        region=DEFAULT_AWS_REGION,
        aws_access_key_id=options.aws_access_key_id,
        aws_secret_access_key=options.aws_secret_access_key)
    ssh_key = bw.create_key_pair(unique_cluster_id)
    # Drop the key to disk so CI can cache it as an artifact
    write_string('ssh_key', ssh_key)

    if options.cluster_ami:
        vpc = test_util.aws.VpcCfStack.create_from_ami(
            stack_name=unique_cluster_id,
            instance_type=options.instance_type,
            instance_ami=options.cluster_ami,
            # An instance for each cluster node plus the bootstrap.
            instance_count=(options.masters + options.agents + options.public_agents + 1),
            admin_location='0.0.0.0/0',
            key_pair_name=unique_cluster_id,
            boto_wrapper=bw)
        ssh_info = SshInfo(user=options.cluster_ami_ssh_user, home_dir=options.cluster_ami_ssh_home_dir)
    else:
        vpc, ssh_info = test_util.aws.VpcCfStack.create(
            stack_name=unique_cluster_id,
            instance_type=options.instance_type,
            instance_os=os_name,
            # An instance for each cluster node plus the bootstrap.
            instance_count=(options.masters + options.agents + options.public_agents + 1),
            admin_location='0.0.0.0/0',
            key_pair_name=unique_cluster_id,
            boto_wrapper=bw)
    vpc.wait_for_complete()

    cluster = test_util.cluster.Cluster.from_vpc(
        vpc,
        ssh_info,
        ssh_key=ssh_key,
        num_masters=options.masters,
        num_agents=options.agents,
        num_public_agents=options.public_agents,
    )

    test_util.cluster.install_dcos(
        cluster,
        installer_url=options.installer_url,
        setup=options.do_setup,
        api=options.use_api,
        add_config_path=options.add_config_path,
        # If we don't want to test the prereq install, use offline mode to avoid it.
        installer_api_offline_mode=(not options.test_install_prereqs),
        install_prereqs=options.test_install_prereqs,
        install_prereqs_only=options.test_install_prereqs_only,
    )

    if options.test_install_prereqs and options.test_install_prereqs_only:
        # install_dcos() exited after running prereqs, so we're done.
        vpc.delete()
        bw.delete_key_pair(unique_cluster_id)
        sys.exit(0)

    result = test_util.cluster.run_integration_tests(
        cluster,
        # Setting dns_search: mesos not currently supported in API
        region=DEFAULT_AWS_REGION,
        aws_access_key_id=options.aws_access_key_id,
        aws_secret_access_key=options.aws_secret_access_key,
        test_cmd=options.test_cmd,
    )

    if result == 0:
        log.info("Test successful! Deleting VPC if provided in this run.")
        vpc.delete()
        bw.delete_key_pair(unique_cluster_id)
    else:
        log.info("Test failed! VPC will remain for debugging 1 hour from instantiation")
    if options.ci_flags:
        result = 0  # Wipe the return code so that tests can be muted in CI
    sys.exit(result)
Exemplo n.º 26
0
    def activate(self, packages):
        # Ensure the new set is reasonable.
        validate_compatible(packages, self.__roles)

        # Build the absolute paths for the running config, new config location,
        # and where to archive the config.
        active_names = self.get_active_names()
        active_dirs = list(map(self._make_abs, self.__well_known_dirs + ["active"]))

        new_names = [name + ".new" for name in active_names]
        new_dirs = [name + ".new" for name in active_dirs]

        old_names = [name + ".old" for name in active_names]

        # Remove all pre-existing new and old directories
        for name in chain(new_names, old_names):
            if (os.path.exists(name)):
                if os.path.isdir(name):
                    shutil.rmtree(name)
                else:
                    os.remove(name)

        # Make the directories for the new config
        for name in new_dirs:
            os.makedirs(name)

        # Fill in all the new contents
        def symlink_all(src, dest):
            if not os.path.isdir(src):
                return

            symlink_tree(src, dest)

        # Set the new LD_LIBRARY_PATH, PATH.
        env_contents = env_header.format("/opt/mesosphere" if self.__fake_path else self.__root)
        env_export_contents = env_export_header.format("/opt/mesosphere" if self.__fake_path else self.__root)

        active_buildinfo_full = {}

        # Add the folders, config in each package.
        for package in packages:
            # Package folders
            # NOTE: Since active is at the end of the folder list it will be
            # removed by the zip. This is the desired behavior, since it will be
            # populated later.
            # Do the basename since some well known dirs are full paths (dcos.target.wants)
            # while inside the packages they are always top level directories.
            for new, dir_name in zip(new_dirs, self.__well_known_dirs):
                dir_name = os.path.basename(dir_name)
                pkg_dir = os.path.join(package.path, dir_name)
                assert os.path.isabs(new)
                assert os.path.isabs(pkg_dir)

                try:
                    symlink_all(pkg_dir, new)

                    # Symlink all applicable role-based config
                    for role in self.__roles:
                        role_dir = os.path.join(package.path, "{0}_{1}".format(dir_name, role))
                        symlink_all(role_dir, new)
                except ConflictingFile as ex:
                    raise ValidationError("Two packages are trying to install the same file {0} or "
                                          "two roles in the set of roles {1} are causing a package "
                                          "to try activating multiple versions of the same file. "
                                          "One of the package files is {2}.".format(
                                            ex.dest,
                                            self.__roles,
                                            ex.src))

            # Add to the active folder
            os.symlink(package.path, os.path.join(self._make_abs("active.new"), package.name))

            # Add to the environment contents
            env_contents += "# package: {0}\n".format(package.id)
            for k, v in package.environment.items():
                env_contents += "{0}={1}\n".format(k, v)
            env_contents += "\n"

            # Add to the environment.export contents
            env_export_contents += "# package: {0}\n".format(package.id)
            for k, v in package.environment.items():
                env_export_contents += "export {0}={1}\n".format(k, v)
            env_export_contents += "\n"

            # Add to the buildinfo
            try:
                active_buildinfo_full[package.name] = load_json(os.path.join(package.path, "buildinfo.full.json"))
            except FileNotFoundError:
                # TODO(cmaloney): These only come from setup-packages. Should update
                # setup-packages to add a buildinfo.full for those packages
                active_buildinfo_full[package.name] = None

        # Write out the new environment file.
        new_env = self._make_abs("environment.new")
        write_string(new_env, env_contents)

        # Write out the new environment.export file
        new_env_export = self._make_abs("environment.export.new")
        write_string(new_env_export, env_export_contents)

        # Write out the buildinfo of every active package
        new_buildinfo_meta = self._make_abs("active.buildinfo.full.json.new")
        write_json(new_buildinfo_meta, active_buildinfo_full)

        self.swap_active(".new")
Exemplo n.º 27
0
def build(variant, package_dir, name, repository_url, clean_after_build):
    print("Building package {} variant {}".format(name, variant or "<default>"))
    tmpdir = tempfile.TemporaryDirectory(prefix="pkgpanda_repo")
    repository = Repository(tmpdir.name)

    def pkg_abs(name):
        return package_dir + '/' + name

    # Build pkginfo over time, translating fields from buildinfo.
    pkginfo = {}

    # Build up the docker command arguments over time, translating fields as needed.
    cmd = DockerCmd()

    buildinfo = load_buildinfo(package_dir, variant)

    if 'name' in buildinfo:
        raise BuildError("'name' is not allowed in buildinfo.json, it is implicitly the name of the "
                         "folder containing the buildinfo.json")

    # Make sure build_script is only set on variants
    if 'build_script' in buildinfo and variant is None:
        raise BuildError("build_script can only be set on package variants")

    # Convert single_source -> sources
    try:
        sources = expand_single_source_alias(name, buildinfo)
    except ValidationError as ex:
        raise BuildError("Invalid buildinfo.json for package: {}".format(ex)) from ex

    # Save the final sources back into buildinfo so it gets written into
    # buildinfo.json. This also means buildinfo.json is always expanded form.
    buildinfo['sources'] = sources

    # Construct the source fetchers, gather the checkout ids from them
    checkout_ids = dict()
    fetchers = dict()
    try:
        for src_name, src_info in sorted(sources.items()):
            if src_info['kind'] not in pkgpanda.build.src_fetchers.all_fetchers:
                raise ValidationError("No known way to catch src with kind '{}'. Known kinds: {}".format(
                    src_info['kind'],
                    pkgpanda.src_fetchers.all_fetchers.keys()))

            cache_dir = pkg_abs("cache")
            if not os.path.exists(cache_dir):
                os.mkdir(cache_dir)

            fetchers[src_name] = pkgpanda.build.src_fetchers.all_fetchers[src_info['kind']](src_name,
                                                                                            src_info,
                                                                                            package_dir)
            checkout_ids[src_name] = fetchers[src_name].get_id()
    except ValidationError as ex:
        raise BuildError("Validation error when fetching sources for package: {}".format(ex))

    for src_name, checkout_id in checkout_ids.items():
        # NOTE: single_source buildinfo was expanded above so the src_name is
        # always correct here.
        # Make sure we never accidentally overwrite something which might be
        # important. Fields should match if specified (And that should be
        # tested at some point). For now disallowing identical saves hassle.
        assert_no_duplicate_keys(checkout_id, buildinfo['sources'][src_name])
        buildinfo['sources'][src_name].update(checkout_id)

    # Add the sha1sum of the buildinfo.json + build file to the build ids
    build_ids = {"sources": checkout_ids}
    build_ids['build'] = pkgpanda.util.sha1(pkg_abs("build"))
    build_ids['pkgpanda_version'] = pkgpanda.build.constants.version
    build_ids['variant'] = '' if variant is None else variant

    extra_dir = pkg_abs("extra")
    # Add the "extra" folder inside the package as an additional source if it
    # exists
    if os.path.exists(extra_dir):
        extra_id = hash_folder(extra_dir)
        build_ids['extra_source'] = extra_id
        buildinfo['extra_source'] = extra_id

    # Figure out the docker name.
    docker_name = buildinfo.get('docker', 'dcos-builder:latest')
    cmd.container = docker_name

    # Add the id of the docker build environment to the build_ids.
    try:
        docker_id = get_docker_id(docker_name)
    except CalledProcessError:
        # docker pull the container and try again
        check_call(['docker', 'pull', docker_name])
        docker_id = get_docker_id(docker_name)

    build_ids['docker'] = docker_id

    # TODO(cmaloney): The environment variables should be generated during build
    # not live in buildinfo.json.
    build_ids['environment'] = buildinfo.get('environment', {})

    # Packages need directories inside the fake install root (otherwise docker
    # will try making the directories on a readonly filesystem), so build the
    # install root now, and make the package directories in it as we go.
    install_dir = tempfile.mkdtemp(prefix="pkgpanda-")

    active_packages = list()
    active_package_ids = set()
    active_package_variants = dict()
    auto_deps = set()
    # Verify all requires are in the repository.
    if 'requires' in buildinfo:
        # Final package has the same requires as the build.
        pkginfo['requires'] = buildinfo['requires']

        # TODO(cmaloney): Pull generating the full set of requires a function.
        to_check = copy.deepcopy(buildinfo['requires'])
        if type(to_check) != list:
            raise BuildError("`requires` in buildinfo.json must be an array of dependencies.")
        while to_check:
            requires_info = to_check.pop(0)
            requires_name, requires_variant = expand_require(requires_info)

            if requires_name in active_package_variants:
                # TODO(cmaloney): If one package depends on the <default>
                # variant of a package and 1+ others depends on a non-<default>
                # variant then update the dependency to the non-default variant
                # rather than erroring.
                if requires_variant != active_package_variants[requires_name]:
                    # TODO(cmaloney): Make this contain the chains of
                    # dependencies which contain the conflicting packages.
                    # a -> b -> c -> d {foo}
                    # e {bar} -> d {baz}
                    raise BuildError("Dependncy on multiple variants of the same package {}. "
                                     "variants: {} {}".format(
                                        requires_name,
                                        requires_variant,
                                        active_package_variants[requires_name]))

                # The variant has package {requires_name, variant} already is a
                # dependency, don't process it again / move on to the next.
                continue

            active_package_variants[requires_name] = requires_variant

            # Figure out the last build of the dependency, add that as the
            # fully expanded dependency.
            require_package_dir = os.path.normpath(pkg_abs('../' + requires_name))
            last_build = require_package_dir + '/' + last_build_filename(requires_variant)
            if not os.path.exists(last_build):
                raise BuildError("No last build file found for dependency {} variant {}. Rebuild "
                                 "the dependency".format(requires_name, requires_variant))

            try:
                pkg_id_str = load_string(last_build)
                auto_deps.add(pkg_id_str)
                pkg_buildinfo = load_buildinfo(require_package_dir, requires_variant)
                pkg_requires = pkg_buildinfo.get('requires', list())
                pkg_path = repository.package_path(pkg_id_str)
                pkg_tar = pkg_id_str + '.tar.xz'
                if not os.path.exists(require_package_dir + '/' + pkg_tar):
                    raise BuildError("The build tarball {} refered to by the last_build file of the "
                                     "dependency {} variant {} doesn't exist. Rebuild the dependency.".format(
                                        pkg_tar,
                                        requires_name,
                                        requires_variant))

                active_package_ids.add(pkg_id_str)

                # Mount the package into the docker container.
                cmd.volumes[pkg_path] = "/opt/mesosphere/packages/{}:ro".format(pkg_id_str)
                os.makedirs(os.path.join(install_dir, "packages/{}".format(pkg_id_str)))

                # Add the dependencies of the package to the set which will be
                # activated.
                # TODO(cmaloney): All these 'transitive' dependencies shouldn't
                # be available to the package being built, only what depends on
                # them directly.
                to_check += pkg_requires
            except ValidationError as ex:
                raise BuildError("validating package needed as dependency {0}: {1}".format(requires_name, ex)) from ex
            except PackageError as ex:
                raise BuildError("loading package needed as dependency {0}: {1}".format(requires_name, ex)) from ex

    # Add requires to the package id, calculate the final package id.
    # NOTE: active_packages isn't fully constructed here since we lazily load
    # packages not already in the repository.
    build_ids['requires'] = list(active_package_ids)
    version_base = hash_checkout(build_ids)
    version = None
    if "version_extra" in buildinfo:
        version = "{0}-{1}".format(buildinfo["version_extra"], version_base)
    else:
        version = version_base
    pkg_id = PackageId.from_parts(name, version)

    # Save the build_ids. Useful for verify exactly what went into the
    # package build hash.
    buildinfo['build_ids'] = build_ids
    buildinfo['package_version'] = version

    # Save the package name and variant. The variant is used when installing
    # packages to validate dependencies.
    buildinfo['name'] = name
    buildinfo['variant'] = variant

    # If the package is already built, don't do anything.
    pkg_path = pkg_abs("{}.tar.xz".format(pkg_id))

    # Done if it exists locally
    if exists(pkg_path):
        print("Package up to date. Not re-building.")

        # TODO(cmaloney): Updating / filling last_build should be moved out of
        # the build function.
        check_call(["mkdir", "-p", pkg_abs("cache")])
        write_string(pkg_abs(last_build_filename(variant)), str(pkg_id))

        return pkg_path

    # Try downloading.
    if repository_url:
        tmp_filename = pkg_path + '.tmp'
        try:
            # Normalize to no trailing slash for repository_url
            repository_url = repository_url.rstrip('/')
            url = repository_url + '/packages/{0}/{1}.tar.xz'.format(pkg_id.name, str(pkg_id))
            print("Attempting to download", pkg_id, "from", url)
            download(tmp_filename, url, package_dir)
            os.rename(tmp_filename, pkg_path)

            print("Package up to date. Not re-building. Downloaded from repository-url.")
            # TODO(cmaloney): Updating / filling last_build should be moved out of
            # the build function.
            check_call(["mkdir", "-p", pkg_abs("cache")])
            write_string(pkg_abs(last_build_filename(variant)), str(pkg_id))
            return pkg_path
        except FetchError:
            try:
                os.remove(tmp_filename)
            except:
                pass

            # Fall out and do the build since the command errored.
            print("Unable to download from cache. Proceeding to build")

    print("Building package {} with buildinfo: {}".format(
        pkg_id,
        json.dumps(buildinfo, indent=2, sort_keys=True)))

    # Clean out src, result so later steps can use them freely for building.
    clean(package_dir)

    # Only fresh builds are allowed which don't overlap existing artifacts.
    result_dir = pkg_abs("result")
    if exists(result_dir):
        raise BuildError("result folder must not exist. It will be made when the package is "
                         "built. {}".format(result_dir))

    # 'mkpanda add' all implicit dependencies since we actually need to build.
    for dep in auto_deps:
        print("Auto-adding dependency: {}".format(dep))
        # NOTE: Not using the name pkg_id because that overrides the outer one.
        id_obj = PackageId(dep)
        add_to_repository(repository, pkg_abs('../{0}/{1}.tar.xz'.format(id_obj.name, dep)))
        package = repository.load(dep)
        active_packages.append(package)

    # Checkout all the sources int their respective 'src/' folders.
    try:
        src_dir = pkg_abs('src')
        if os.path.exists(src_dir):
            raise ValidationError(
                "'src' directory already exists, did you have a previous build? " +
                "Currently all builds must be from scratch. Support should be " +
                "added for re-using a src directory when possible. src={}".format(src_dir))
        os.mkdir(src_dir)
        for src_name, fetcher in sorted(fetchers.items()):
            root = pkg_abs('src/' + src_name)
            os.mkdir(root)

            fetcher.checkout_to(root)
    except ValidationError as ex:
        raise BuildError("Validation error when fetching sources for package: {}".format(ex))

    # Copy over environment settings
    if 'environment' in buildinfo:
        pkginfo['environment'] = buildinfo['environment']

    # Activate the packages so that we have a proper path, environment
    # variables.
    # TODO(cmaloney): RAII type thing for temproary directory so if we
    # don't get all the way through things will be cleaned up?
    install = Install(install_dir, None, True, False, True, True)
    install.activate(active_packages)
    # Rewrite all the symlinks inside the active path because we will
    # be mounting the folder into a docker container, and the absolute
    # paths to the packages will change.
    # TODO(cmaloney): This isn't very clean, it would be much nicer to
    # just run pkgpanda inside the package.
    rewrite_symlinks(install_dir, repository.path, "/opt/mesosphere/packages/")

    print("Building package in docker")

    # TODO(cmaloney): Run as a specific non-root user, make it possible
    # for non-root to cleanup afterwards.
    # Run the build, prepping the environment as necessary.
    mkdir(pkg_abs("result"))

    # Copy the build info to the resulting tarball
    write_json(pkg_abs("src/buildinfo.full.json"), buildinfo)
    write_json(pkg_abs("result/buildinfo.full.json"), buildinfo)

    write_json(pkg_abs("result/pkginfo.json"), pkginfo)

    # Make the folder for the package we are building. If docker does it, it
    # gets auto-created with root permissions and we can't actually delete it.
    os.makedirs(os.path.join(install_dir, "packages", str(pkg_id)))

    # TOOD(cmaloney): Disallow writing to well known files and directories?
    # Source we checked out
    cmd.volumes.update({
        # TODO(cmaloney): src should be read only...
        pkg_abs("src"): "/pkg/src:rw",
        # The build script
        pkg_abs(buildinfo.get('build_script', 'build')): "/pkg/build:ro",
        # Getting the result out
        pkg_abs("result"): "/opt/mesosphere/packages/{}:rw".format(pkg_id),
        install_dir: "/opt/mesosphere:ro"
    })

    if os.path.exists(extra_dir):
        cmd.volumes[extra_dir] = "/pkg/extra:ro"

    cmd.environment = {
        "PKG_VERSION": version,
        "PKG_NAME": name,
        "PKG_ID": pkg_id,
        "PKG_PATH": "/opt/mesosphere/packages/{}".format(pkg_id),
        "PKG_VARIANT": variant if variant is not None else "<default>"
    }

    try:
        # TODO(cmaloney): Run a wrapper which sources
        # /opt/mesosphere/environment then runs a build. Also should fix
        # ownership of /opt/mesosphere/packages/{pkg_id} post build.
        cmd.run([
            "/bin/bash",
            "-o", "nounset",
            "-o", "pipefail",
            "-o", "errexit",
            "/pkg/build"])
    except CalledProcessError as ex:
        raise BuildError("docker exited non-zero: {}\nCommand: {}".format(ex.returncode, ' '.join(ex.cmd)))

    # Clean up the temporary install dir used for dependencies.
    # TODO(cmaloney): Move to an RAII wrapper.
    check_call(['rm', '-rf', install_dir])

    print("Building package tarball")

    # Check for forbidden services before packaging the tarball:
    try:
        check_forbidden_services(pkg_abs("result"), RESERVED_UNIT_NAMES)
    except ValidationError as ex:
        raise BuildError("Package validation failed: {}".format(ex))

    # TODO(cmaloney): Updating / filling last_build should be moved out of
    # the build function.
    check_call(["mkdir", "-p", pkg_abs("cache")])
    write_string(pkg_abs(last_build_filename(variant)), str(pkg_id))

    # Bundle the artifacts into the pkgpanda package
    tmp_name = pkg_path + "-tmp.tar.xz"
    make_tar(tmp_name, pkg_abs("result"))
    os.rename(tmp_name, pkg_path)
    print("Package built.")
    if clean_after_build:
        clean(package_dir)
    return pkg_path
Exemplo n.º 28
0
Arquivo: config.py Projeto: dcos/dcos
def make_default_config_if_needed(config_path):
    if os.path.exists(config_path):
        return

    write_string(config_path, config_sample)
Exemplo n.º 29
0
Arquivo: config.py Projeto: dcos/dcos
    def write_config(self):
        assert self.config_path is not None

        write_string(self.config_path, self.get_yaml_str())
Exemplo n.º 30
0
def integration_test(
        tunnel, test_dir,
        dcos_dns, master_list, agent_list, public_agent_list,
        provider,
        test_dns_search=True,
        aws_access_key_id='', aws_secret_access_key='', region='', add_env=None,
        pytest_cmd='py.test -rs -vv'):
    """Runs integration test on host

    Args:
        test_dir: directory to leave test_wrapper.sh
        dcos_dns: string representing IP of DCOS DNS host
        master_list: string of comma separated master addresses
        agent_list: string of comma separated agent addresses
        test_dns_search: if set to True, test for deployed mesos DNS app
        provider: (str) either onprem, aws, or azure
    Optional args:
        aws_access_key_id: needed for REXRAY tests
        aws_secret_access_key: needed for REXRAY tests
        region: string indicating AWS region in which cluster is running
        add_env: a python dict with any number of key=value assignments to be passed to
            the test environment
        pytest_cmd: string representing command for py.test

    Returns:
        exit code corresponding to test_cmd run

    """
    dns_search = 'true' if test_dns_search else 'false'
    test_env = [
        'DCOS_DNS_ADDRESS=http://' + dcos_dns,
        'MASTER_HOSTS=' + ','.join(master_list),
        'PUBLIC_MASTER_HOSTS=' + ','.join(master_list),
        'SLAVE_HOSTS=' + ','.join(agent_list),
        'PUBLIC_SLAVE_HOSTS=' + ','.join(public_agent_list),
        'DCOS_PROVIDER=' + provider,
        'DNS_SEARCH=' + dns_search,
        'AWS_ACCESS_KEY_ID=' + aws_access_key_id,
        'AWS_SECRET_ACCESS_KEY=' + aws_secret_access_key,
        'AWS_REGION=' + region]
    if add_env:
        for key, value in add_env.items():
            extra_env = key + '=' + value
            test_env.append(extra_env)

    test_env_str = ''.join(['export ' + e + '\n' for e in test_env])

    test_boilerplate = """#!/bin/bash
{env}
cd /opt/mesosphere/active/dcos-integration-test
/opt/mesosphere/bin/dcos-shell {cmd}
"""

    write_string('test_preflight.sh', test_boilerplate.format(
        env=test_env_str, cmd='py.test -rs -vv --collect-only'))
    write_string('test_wrapper.sh', test_boilerplate.format(
        env=test_env_str, cmd=pytest_cmd))

    pretest_path = join(test_dir, 'test_preflight.sh')
    log.info('Running integration test setup check...')
    tunnel.write_to_remote('test_preflight.sh', pretest_path)
    tunnel.remote_cmd(['bash', pretest_path], stdout=sys.stdout.buffer)

    wrapper_path = join(test_dir, 'test_wrapper.sh')
    log.info('Running integration test...')
    tunnel.write_to_remote('test_wrapper.sh', wrapper_path)
    try:
        tunnel.remote_cmd(['bash', wrapper_path], stdout=sys.stdout.buffer)
    except CalledProcessError as e:
        return e.returncode
    return 0
Exemplo n.º 31
0
    def run_test(self) -> int:
        stack_name = 'dcos-ci-test-upgrade-' + random_id(10)

        test_id = uuid.uuid4().hex
        healthcheck_app_id = TEST_APP_NAME_FMT.format('healthcheck-' + test_id)
        dns_app_id = TEST_APP_NAME_FMT.format('dns-' + test_id)

        with logger.scope("create vpc cf stack '{}'".format(stack_name)):
            bw = test_util.aws.BotoWrapper(
                region=self.aws_region,
                aws_access_key_id=self.aws_access_key_id,
                aws_secret_access_key=self.aws_secret_access_key)
            ssh_key = bw.create_key_pair(stack_name)
            write_string('ssh_key', ssh_key)
            vpc, ssh_info = test_util.aws.VpcCfStack.create(
                stack_name=stack_name,
                instance_type='m4.xlarge',
                instance_os='cent-os-7-dcos-prereqs',
                # An instance for each cluster node plus the bootstrap.
                instance_count=(self.num_masters + self.num_agents + self.num_public_agents + 1),
                admin_location='0.0.0.0/0',
                key_pair_name=stack_name,
                boto_wrapper=bw
            )
            vpc.wait_for_complete()

        cluster = test_util.cluster.Cluster.from_vpc(
            vpc,
            ssh_info,
            ssh_key=ssh_key,
            num_masters=self.num_masters,
            num_agents=self.num_agents,
            num_public_agents=self.num_public_agents,
        )

        with logger.scope("install dcos"):
            # Use the CLI installer to set exhibitor_storage_backend = zookeeper.
            test_util.cluster.install_dcos(cluster, self.stable_installer_url, api=False,
                                           add_config_path=self.config_yaml_override_install)

            master_list = [h.private_ip for h in cluster.masters]

            dcos_api_install = self.dcos_api_session_factory_install.apply(
                'http://{ip}'.format(ip=cluster.masters[0].public_ip),
                master_list,
                master_list,
                [h.private_ip for h in cluster.agents],
                [h.private_ip for h in cluster.public_agents],
                self.default_os_user)

            dcos_api_install.wait_for_dcos()

        installed_version = dcos_api_install.get_version()
        healthcheck_app = create_marathon_healthcheck_app(healthcheck_app_id)
        dns_app = create_marathon_dns_app(dns_app_id, healthcheck_app_id)

        self.setup_cluster_workload(dcos_api_install, healthcheck_app, dns_app)

        with logger.scope("upgrade cluster"):
            test_util.cluster.upgrade_dcos(cluster, self.installer_url,
                                           installed_version, add_config_path=self.config_yaml_override_upgrade)
            with cluster.ssher.tunnel(cluster.bootstrap_host) as bootstrap_host_tunnel:
                bootstrap_host_tunnel.remote_cmd(['sudo', 'rm', '-rf', cluster.ssher.home_dir + '/*'])

        # this method invocation looks like it is the same as the one above, and that is partially correct.
        # the arguments to the invocation are the same, but the thing that changes is the lifecycle of the cluster
        # the client is being created to interact with. This client is specifically for the cluster after the
        # upgrade has taken place, and can account for any possible settings that may change for the client under
        # the hood when it probes the cluster.
        dcos_api_upgrade = self.dcos_api_session_factory_upgrade.apply(
            'http://{ip}'.format(ip=cluster.masters[0].public_ip),
            master_list,
            master_list,
            [h.private_ip for h in cluster.agents],
            [h.private_ip for h in cluster.public_agents],
            self.default_os_user)

        dcos_api_upgrade.wait_for_dcos()  # here we wait for DC/OS to be "up" so that we can auth this new client

        self.verify_apps_state(dcos_api_upgrade, dns_app)

        with logger.scope("run integration tests"):
            # copied from test_util/test_aws_cf.py:96
            add_env = []
            prefix = 'TEST_ADD_ENV_'
            for k, v in os.environ.items():
                if k.startswith(prefix):
                    add_env.append(k.replace(prefix, '') + '=' + v)
            test_cmd = ' '.join(add_env) + ' py.test -vv -s -rs ' + os.getenv('CI_FLAGS', '')
            result = test_util.cluster.run_integration_tests(cluster, test_cmd=test_cmd)

        if result == 0:
            self.log.info("Test successful! Deleting VPC if provided in this run.")
            vpc.delete()
            bw.delete_key_pair(stack_name)
        else:
            self.log.info("Test failed! VPC cluster will remain available for "
                          "debugging for 2 hour after instantiation.")

        return result
Exemplo n.º 32
0
def generate(
        arguments,
        extra_templates=list(),
        extra_sources=list(),
        extra_targets=list()):
    # To maintain the old API where we passed arguments rather than the new name.
    user_arguments = arguments
    arguments = None

    sources, targets, templates = get_dcosconfig_source_target_and_templates(
        user_arguments, extra_templates, extra_sources)

    resolver = validate_and_raise(sources, targets + extra_targets)
    argument_dict = get_final_arguments(resolver)
    late_variables = get_late_variables(resolver, sources)
    secret_builtins = ['expanded_config_full', 'user_arguments_full', 'config_yaml_full']
    secret_variables = set(get_secret_variables(sources) + secret_builtins)
    masked_value = '**HIDDEN**'

    # Calculate values for builtin variables.
    user_arguments_masked = {k: (masked_value if k in secret_variables else v) for k, v in user_arguments.items()}
    argument_dict['user_arguments_full'] = json_prettyprint(user_arguments)
    argument_dict['user_arguments'] = json_prettyprint(user_arguments_masked)
    argument_dict['config_yaml_full'] = user_arguments_to_yaml(user_arguments)
    argument_dict['config_yaml'] = user_arguments_to_yaml(user_arguments_masked)

    # The expanded_config and expanded_config_full variables contain all other variables and their values.
    # expanded_config is a copy of expanded_config_full with secret values removed. Calculating these variables' values
    # must come after the calculation of all other variables to prevent infinite recursion.
    # TODO(cmaloney): Make this late-bound by gen.internals
    expanded_config_full = {
        k: v for k, v in argument_dict.items()
        # Omit late-bound variables whose values have not yet been calculated.
        if not v.startswith(gen.internals.LATE_BIND_PLACEHOLDER_START)
    }
    expanded_config_scrubbed = {k: v for k, v in expanded_config_full.items() if k not in secret_variables}
    argument_dict['expanded_config_full'] = format_expanded_config(expanded_config_full)
    argument_dict['expanded_config'] = format_expanded_config(expanded_config_scrubbed)

    log.debug(
        "Final arguments:" + json_prettyprint({
            # Mask secret config values.
            k: (masked_value if k in secret_variables else v) for k, v in argument_dict.items()
        })
    )

    # Fill in the template parameters
    # TODO(cmaloney): render_templates should ideally take the template targets.
    rendered_templates = render_templates(templates, argument_dict)

    # Validate there aren't any unexpected top level directives in any of the files
    # (likely indicates a misspelling)
    for name, template in rendered_templates.items():
        if name == 'dcos-services.yaml':  # yaml list of the service files
            assert isinstance(template, list)
        elif name == 'cloud-config.yaml':
            assert template.keys() <= CLOUDCONFIG_KEYS, template.keys()
        elif isinstance(template, str):  # Not a yaml template
            pass
        else:  # yaml template file
            log.debug("validating template file %s", name)
            assert template.keys() <= PACKAGE_KEYS, template.keys()

    stable_artifacts = []
    channel_artifacts = []

    # Find all files which contain late bind variables and turn them into a "late bind package"
    # TODO(cmaloney): check there are no late bound variables in cloud-config.yaml
    late_files, regular_files = extract_files_containing_late_variables(
        rendered_templates['dcos-config.yaml']['package'])
    # put the regular files right back
    rendered_templates['dcos-config.yaml'] = {'package': regular_files}

    # Render cluster package list artifact.
    cluster_package_list_filename = 'package_lists/{}.package_list.json'.format(
        argument_dict['cluster_package_list_id']
    )
    os.makedirs(os.path.dirname(cluster_package_list_filename), mode=0o755, exist_ok=True)
    write_string(cluster_package_list_filename, argument_dict['cluster_packages'])
    log.info('Cluster package list: {}'.format(cluster_package_list_filename))
    stable_artifacts.append(cluster_package_list_filename)

    def make_package_filename(package_id, extension):
        return 'packages/{0}/{1}{2}'.format(
            package_id.name,
            repr(package_id),
            extension)

    # Render all the cluster packages
    cluster_package_info = {}

    # Prepare late binding config, if any.
    late_package = build_late_package(late_files, argument_dict['config_id'], argument_dict['provider'])
    if late_variables:
        # Render the late binding package. This package will be downloaded onto
        # each cluster node during bootstrap and rendered into the final config
        # using the values from the late config file.
        late_package_id = PackageId(late_package['name'])
        late_package_filename = make_package_filename(late_package_id, '.dcos_config')
        os.makedirs(os.path.dirname(late_package_filename), mode=0o755)
        write_yaml(late_package_filename, {'package': late_package['package']}, default_flow_style=False)
        log.info('Package filename: {}'.format(late_package_filename))
        stable_artifacts.append(late_package_filename)

        # Add the late config file to cloud config. The expressions in
        # late_variables will be resolved by the service handling the cloud
        # config (e.g. Amazon CloudFormation). The rendered late config file
        # on a cluster node's filesystem will contain the final values.
        rendered_templates['cloud-config.yaml']['root'].append({
            'path': '/etc/mesosphere/setup-flags/late-config.yaml',
            'permissions': '0644',
            'owner': 'root',
            # TODO(cmaloney): don't prettyprint to save bytes.
            # NOTE: Use yaml here simply to make avoiding painful escaping and
            # unescaping easier.
            'content': render_yaml({
                'late_bound_package_id': late_package['name'],
                'bound_values': late_variables
            })})

    # Collect metadata for cluster packages.
    for package_id_str in json.loads(argument_dict['cluster_packages']):
        package_id = PackageId(package_id_str)
        package_filename = make_package_filename(package_id, '.tar.xz')

        cluster_package_info[package_id.name] = {
            'id': package_id_str,
            'filename': package_filename
        }

    # Render config packages.
    config_package_ids = json.loads(argument_dict['config_package_ids'])
    for package_id_str in config_package_ids:
        package_id = PackageId(package_id_str)
        package_filename = cluster_package_info[package_id.name]['filename']
        do_gen_package(rendered_templates[package_id.name + '.yaml'], cluster_package_info[package_id.name]['filename'])
        stable_artifacts.append(package_filename)

    # Convert cloud-config to just contain write_files rather than root
    cc = rendered_templates['cloud-config.yaml']

    # Shouldn't contain any packages. Providers should pull what they need to
    # late bind out of other packages via cc_package_file.
    assert 'package' not in cc
    cc_root = cc.pop('root', [])
    # Make sure write_files exists.
    assert 'write_files' not in cc
    cc['write_files'] = []
    # Do the transform
    for item in cc_root:
        assert item['path'].startswith('/')
        cc['write_files'].append(item)
    rendered_templates['cloud-config.yaml'] = cc

    # Add utils that need to be defined here so they can be bound to locals.
    def add_services(cloudconfig, cloud_init_implementation):
        return add_units(cloudconfig, rendered_templates['dcos-services.yaml'], cloud_init_implementation)

    utils.add_services = add_services

    def add_stable_artifact(filename):
        assert filename not in stable_artifacts + channel_artifacts
        stable_artifacts.append(filename)

    utils.add_stable_artifact = add_stable_artifact

    def add_channel_artifact(filename):
        assert filename not in stable_artifacts + channel_artifacts
        channel_artifacts.append(filename)

    utils.add_channel_artifact = add_channel_artifact

    return Bunch({
        'arguments': argument_dict,
        'cluster_packages': cluster_package_info,
        'stable_artifacts': stable_artifacts,
        'channel_artifacts': channel_artifacts,
        'templates': rendered_templates,
        'utils': utils
    })