コード例 #1
0
ファイル: __init__.py プロジェクト: brndnmtthws/dcos
 def get_last_bootstrap(variant):
     bootstrap_latest = self._packages_dir + '/' + pkgpanda.util.variant_prefix(variant) + 'bootstrap.latest'
     if not os.path.exists(bootstrap_latest):
         raise BuildError("No last bootstrap found for variant {}. Expected to find {} to match "
                          "{}".format(pkgpanda.util.variant_name(variant), bootstrap_latest,
                                      pkgpanda.util.variant_prefix(variant) + 'treeinfo.json'))
     return load_string(bootstrap_latest)
コード例 #2
0
 def get_last_bootstrap(variant):
     bootstrap_latest = path + '/' + pkgpanda.util.variant_prefix(variant) + 'bootstrap.latest'
     if not os.path.exists(bootstrap_latest):
         raise BuildError("No last bootstrap found for variant {}. Expected to find {} to match "
                          "{}".format(pkgpanda.util.variant_name(variant), bootstrap_latest,
                                      pkgpanda.util.variant_prefix(variant) + 'treeinfo.json'))
     return load_string(bootstrap_latest)
コード例 #3
0
ファイル: test_upgrade_vpc.py プロジェクト: Gilbert88/dcos
def main():
    num_masters = int(os.getenv('MASTERS', '3'))
    num_agents = int(os.getenv('AGENTS', '2'))
    num_public_agents = int(os.getenv('PUBLIC_AGENTS', '1'))

    stable_installer_url = os.environ['STABLE_INSTALLER_URL']
    installer_url = os.environ['INSTALLER_URL']
    aws_region = os.getenv('DEFAULT_AWS_REGION', 'eu-central-1')
    aws_access_key_id = os.getenv('AWS_ACCESS_KEY_ID')
    aws_secret_access_key = os.getenv('AWS_SECRET_ACCESS_KEY')
    ssh_key = load_string(os.getenv('DCOS_SSH_KEY_PATH', 'default_ssh_key'))

    config_yaml_override_install = os.getenv('CONFIG_YAML_OVERRIDE_INSTALL')
    config_yaml_override_upgrade = os.getenv('CONFIG_YAML_OVERRIDE_UPGRADE')

    dcos_api_session_factory = DcosApiSessionFactory()
    test = VpcClusterUpgradeTest(num_masters, num_agents, num_public_agents,
                                 stable_installer_url, installer_url,
                                 aws_region, aws_access_key_id, aws_secret_access_key,
                                 ssh_key, "root",
                                 config_yaml_override_install, config_yaml_override_upgrade,
                                 dcos_api_session_factory, dcos_api_session_factory)
    status = test.run_test()

    sys.exit(status)
コード例 #4
0
ファイル: __init__.py プロジェクト: yonglehou/dcos
def get_last_bootstrap_set(path):
    assert path[-1] != '/'
    last_bootstrap = {}

    # Get all the tree variants. If there is a treeinfo.json for the default
    # variant this won't catch it because that would be just 'treeinfo.json' /
    # not have the '.' before treeinfo.json.
    for filename in os.listdir(path):
        if filename.endswith('.treeinfo.json'):
            variant_name = filename[:-len('.treeinfo.json')]
            bootstrap_id = load_string(path + '/' + variant_name + '.bootstrap.latest')
            last_bootstrap[variant_name] = bootstrap_id

    # Add in None / the default variant with a python None.
    # Use a python none so that handling it incorrectly around strings will
    # result in more visible errors than empty string would.
    last_bootstrap[None] = load_string(path + '/' + 'bootstrap.latest')

    return last_bootstrap
コード例 #5
0
def main():
    options = check_environment()
    aw = AzureWrapper(options.location, options.subscription_id,
                      options.client_id, options.client_secret,
                      options.tenant_id)
    dcos_resource_group = DcosAzureResourceGroup.deploy_acs_template(
        azure_wrapper=aw,
        template_url=options.template_url,
        group_name=options.name,
        public_key=options.public_ssh_key,
        master_prefix=options.master_prefix,
        agent_prefix=options.agent_prefix,
        admin_name=options.linux_user,
        oauth_enabled=options.oauth_enabled,
        vm_size=options.vm_size,
        agent_count=options.num_agents,
        name_suffix=options.name_suffix,
        vm_diagnostics_enabled=options.vm_diagnostics_enabled)
    result = 1
    dcos_resource_group.wait_for_deployment()
    dcos_dns = dcos_resource_group.public_master_lb_fqdn
    master_list = [
        ip.private_ip for ip in dcos_resource_group.get_master_ips()
    ]
    with tunnel(options.linux_user,
                load_string(options.ssh_key_path),
                dcos_dns,
                port=2200) as t:
        result = integration_test(
            tunnel=t,
            dcos_dns=master_list[0],
            master_list=master_list,
            agent_list=[
                ip.private_ip
                for ip in dcos_resource_group.get_private_agent_ips()
            ],
            public_agent_list=[
                ip.private_ip
                for ip in dcos_resource_group.get_public_agent_ips()
            ],
            test_cmd=options.test_cmd)
    if result == 0:
        log.info('Test successsful! Deleting Azure resource group')
        dcos_resource_group.delete()
    else:
        logging.warning(
            'Test exited with an error; Resource group preserved for troubleshooting.'
            'See https://github.com/mesosphere/cloudcleaner project for cleanup policies'
        )
    if options.ci_flags:
        result = 0  # Wipe the return code so that tests can be muted in CI
    sys.exit(result)
コード例 #6
0
ファイル: __init__.py プロジェクト: bernadinm/dcos
def load_templates(template_dict):
    result = dict()
    for name, template_list in template_dict.items():
        result_list = list()
        for template_name in template_list:
            result_list.append(gen.template.parse_resources(template_name))

            extra_filename = "gen_extra/" + template_name
            if os.path.exists(extra_filename):
                result_list.append(gen.template.parse_str(
                    load_string(extra_filename)))
        result[name] = result_list
    return result
コード例 #7
0
ファイル: __init__.py プロジェクト: sriram-ranga/dcos
def load_templates(template_dict):
    result = dict()
    for name, template_list in template_dict.items():
        result_list = list()
        for template_name in template_list:
            result_list.append(gen.template.parse_resources(template_name))

            extra_filename = "gen_extra/" + template_name
            if os.path.exists(extra_filename):
                result_list.append(
                    gen.template.parse_str(load_string(extra_filename)))
        result[name] = result_list
    return result
コード例 #8
0
ファイル: __init__.py プロジェクト: Radek44/dcos
 def get_last_bootstrap(variant):
     bootstrap_latest = (
         self.get_bootstrap_cache_dir() + "/" + pkgpanda.util.variant_prefix(variant) + "bootstrap.latest"
     )
     if not os.path.exists(bootstrap_latest):
         raise BuildError(
             "No last bootstrap found for variant {}. Expected to find {} to match "
             "{}".format(
                 pkgpanda.util.variant_name(variant),
                 bootstrap_latest,
                 pkgpanda.util.variant_prefix(variant) + "treeinfo.json",
             )
         )
     return load_string(bootstrap_latest)
コード例 #9
0
ファイル: exhibitor.py プロジェクト: zhous1q/dcos
def try_shortcut():
    try:
        # pid stat file exists, read the value out of it
        stashed_pid_stat = int(load_string(stash_zk_pid_stat_mtime_path))
    except FileNotFoundError:
        log.info('No zk.pid last mtime found at %s',
                 stash_zk_pid_stat_mtime_path)
        return False

    # Make sure that the pid hasn't been written anew
    cur_pid_stat = get_zk_pid_mtime()

    if cur_pid_stat is None:
        return False

    if stashed_pid_stat != cur_pid_stat:
        return False

    # Check that the PID has a zk running at it currently.
    zk_pid = get_zk_pid()
    cmdline_path = '/proc/{}/cmdline'.format(zk_pid)
    try:
        # Custom because the command line is ascii with `\x00` as separator.
        with open(cmdline_path, 'rb') as f:
            cmd_line = f.read().split(b'\x00')[:-1]
    except FileNotFoundError:
        log.info(
            'Process no longer running (couldn\'t read the cmdline at: %s)',
            zk_pid)
        return False

    log.info('PID %s has command line %s', zk_pid, cmd_line)

    if len(cmd_line) < 3:
        log.info("Command line too short to be zookeeper started by exhibitor")
        return False

    if cmd_line[-1] != b'/var/lib/dcos/exhibitor/conf/zoo.cfg' \
            or cmd_line[0] != b'/opt/mesosphere/active/java/usr/java/bin/java':
        log.info(
            "command line doesn't start with java and end with zookeeper.cfg")
        return False

    log.info(
        "PID file hasn't been modified. ZK still seems to be at that PID.")
    return True
コード例 #10
0
ファイル: config.py プロジェクト: tamarrow/dcos
def calculate_dcos_config_contents(dcos_config, num_masters, ssh_user, ssh_private_key, platform):
    """ Fills in the ssh user, ssh private key and ip-detect script if possible
    Takes the config's local references and converts them into transmittable content
    """
    user_config = yaml.load(dcos_config)
    # Use the default config in the installer for the same experience
    # w.r.t the auto-filled settings
    config = yaml.load(dcos_installer.config.config_sample)
    config.update(user_config)
    config['ssh_user'] = ssh_user
    config['ssh_key'] = ssh_private_key
    for key_name in ['ip_detect_filename', 'ip_detect_public_filename']:
        if key_name in config:
            config[key_name.replace('_filename', '_contents')] = load_string(config[key_name])
            del config[key_name]
    if 'ip_detect_contents' not in config:
        config['ip_detect_contents'] = test_util.helpers.ip_detect_script(platform)
    return yaml.dump(config)
コード例 #11
0
ファイル: test_azure.py プロジェクト: tamarrow/dcos
def main():
    options = check_environment()
    aw = AzureWrapper(
        options.location,
        options.subscription_id,
        options.client_id,
        options.client_secret,
        options.tenant_id)
    dcos_resource_group = DcosAzureResourceGroup.deploy_acs_template(
        azure_wrapper=aw,
        template_url=options.template_url,
        group_name=options.name,
        public_key=options.public_ssh_key,
        master_prefix=options.master_prefix,
        agent_prefix=options.agent_prefix,
        admin_name=options.linux_user,
        oauth_enabled=options.oauth_enabled,
        vm_size=options.vm_size,
        agent_count=options.num_agents,
        name_suffix=options.name_suffix,
        vm_diagnostics_enabled=options.vm_diagnostics_enabled)
    result = 1
    dcos_resource_group.wait_for_deployment()
    dcos_dns = dcos_resource_group.public_master_lb_fqdn
    master_list = [ip.private_ip for ip in dcos_resource_group.get_master_ips()]
    with tunnel(options.linux_user, load_string(options.ssh_key_path),
                dcos_dns, port=2200) as t:
        result = integration_test(
            tunnel=t,
            dcos_dns=dcos_dns,
            master_list=master_list,
            agent_list=[ip.private_ip for ip in dcos_resource_group.get_private_agent_ips()],
            public_agent_list=[ip.private_ip for ip in dcos_resource_group.get_public_agent_ips()],
            test_cmd=options.test_cmd)
    if result == 0:
        log.info('Test successsful! Deleting Azure resource group')
        dcos_resource_group.delete()
    else:
        logging.warning('Test exited with an error; Resource group preserved for troubleshooting.'
                        'See https://github.com/mesosphere/cloudcleaner project for cleanup policies')
    if options.ci_flags:
        result = 0  # Wipe the return code so that tests can be muted in CI
    sys.exit(result)
コード例 #12
0
def main():
    options = check_environment()
    aw = AzureWrapper(options.location, options.subscription_id,
                      options.client_id, options.client_secret,
                      options.tenant_id)
    dcos_resource_group = DcosAzureResourceGroup.deploy_acs_template(
        azure_wrapper=aw,
        template_url=options.template_url,
        group_name=options.name,
        public_key=options.public_ssh_key,
        master_prefix=options.master_prefix,
        agent_prefix=options.agent_prefix,
        admin_name=options.linux_user,
        oauth_enabled=options.oauth_enabled,
        vm_size=options.vm_size,
        agent_count=options.num_agents,
        name_suffix=options.name_suffix,
        vm_diagnostics_enabled=options.vm_diagnostics_enabled)
    result = 1
    with ExitStack() as stack:
        if options.azure_cleanup:
            stack.push(dcos_resource_group)
        dcos_resource_group.wait_for_deployment()
        t = stack.enter_context(
            tunnel(options.linux_user,
                   load_string(options.ssh_key_path),
                   dcos_resource_group.outputs['masterFQDN'],
                   port=2200))
        result = integration_test(
            tunnel=t,
            dcos_dns=dcos_resource_group.get_master_ips()[0],
            master_list=dcos_resource_group.get_master_ips(),
            agent_list=dcos_resource_group.get_private_ips(),
            public_agent_list=dcos_resource_group.get_public_ips(),
            test_cmd=options.test_cmd)
    if result == 0:
        log.info('Test successsful!')
    else:
        logging.warning('Test exited with an error')
    if options.ci_flags:
        result = 0  # Wipe the return code so that tests can be muted in CI
    sys.exit(result)
コード例 #13
0
ファイル: test_aws_cf.py プロジェクト: cmaloney/dcos
def main():
    options = check_environment()
    cf, ssh_info = provide_cluster(options)
    cluster = test_util.cluster.Cluster.from_cloudformation(cf, ssh_info, load_string(options.ssh_key_path))

    result = test_util.cluster.run_integration_tests(
        cluster,
        region=options.aws_region,
        aws_access_key_id=options.aws_access_key_id,
        aws_secret_access_key=options.aws_secret_access_key,
        test_cmd=options.test_cmd,
    )
    if result == 0:
        log.info('Test successful! Deleting CloudFormation.')
        cf.delete()
    else:
        logging.warning('Test exited with an error')
    if options.ci_flags:
        result = 0  # Wipe the return code so that tests can be muted in CI
    sys.exit(result)
コード例 #14
0
ファイル: exhibitor.py プロジェクト: branden/dcos
def try_shortcut():
    try:
        # pid stat file exists, read the value out of it
        stashed_pid_stat = int(load_string(stash_zk_pid_stat_mtime_path))
    except FileNotFoundError:
        log.info('No zk.pid last mtime found at %s', stash_zk_pid_stat_mtime_path)
        return False

    # Make sure that the pid hasn't been written anew
    cur_pid_stat = get_zk_pid_mtime()

    if cur_pid_stat is None:
        return False

    if stashed_pid_stat != cur_pid_stat:
        return False

    # Check that the PID has a zk running at it currently.
    zk_pid = get_zk_pid()
    cmdline_path = '/proc/{}/cmdline'.format(zk_pid)
    try:
        # Custom because the command line is ascii with `\0` as separator.
        with open(cmdline_path, 'rb') as f:
            cmd_line = f.read().split(b'\0')[:-1]
    except FileNotFoundError:
        log.info('Process no longer running (couldn\'t read the cmdline at: %s)', zk_pid)
        return False

    log.info('PID %s has command line %s', zk_pid, cmd_line)

    if len(cmd_line) < 3:
        log.info("Command line too short to be zookeeper started by exhibitor")
        return False

    if cmd_line[-1] != b'/var/lib/dcos/exhibitor/conf/zoo.cfg' \
            or cmd_line[0] != b'/opt/mesosphere/active/java/usr/java/bin/java':
        log.info("command line doesn't start with java and end with zookeeper.cfg")
        return False

    log.info("PID file hasn't been modified. ZK still seems to be at that PID.")
    return True
コード例 #15
0
ファイル: config.py プロジェクト: warrenween/dcos
def calculate_dcos_config_contents(dcos_config, num_masters, ssh_user,
                                   ssh_private_key, platform):
    """ Fills in the ssh user, ssh private key and ip-detect script if possible
    Takes the config's local references and converts them into transmittable content
    """
    user_config = yaml.load(dcos_config)
    # Use the default config in the installer for the same experience
    # w.r.t the auto-filled settings
    config = yaml.load(dcos_installer.config.config_sample)
    config.update(user_config)
    config['ssh_user'] = ssh_user
    config['ssh_key'] = ssh_private_key
    for key_name in ['ip_detect_filename', 'ip_detect_public_filename']:
        if key_name in config:
            config[key_name.replace('_filename', '_contents')] = load_string(
                config[key_name])
            del config[key_name]
    if 'ip_detect_contents' not in config:
        config['ip_detect_contents'] = test_util.helpers.ip_detect_script(
            platform)
    return yaml.dump(config)
コード例 #16
0
def main():
    options = check_environment()
    cf, ssh_info = provide_cluster(options)
    cluster = test_util.cluster.Cluster.from_cloudformation(
        cf, ssh_info, load_string(options.ssh_key_path))

    result = test_util.cluster.run_integration_tests(
        cluster,
        region=options.aws_region,
        aws_access_key_id=options.aws_access_key_id,
        aws_secret_access_key=options.aws_secret_access_key,
        test_cmd=options.test_cmd,
    )
    if result == 0:
        log.info('Test successful! Deleting CloudFormation.')
        cf.delete()
    else:
        logging.warning('Test exited with an error')
    if options.ci_flags:
        result = 0  # Wipe the return code so that tests can be muted in CI
    sys.exit(result)
コード例 #17
0
def build(package_store, name, variant, clean_after_build, recursive=False):
    assert isinstance(package_store, PackageStore)
    print("Building package {} variant {}".format(
        name, pkgpanda.util.variant_str(variant)))
    tmpdir = tempfile.TemporaryDirectory(prefix="pkgpanda_repo")
    repository = Repository(tmpdir.name)

    package_dir = package_store.get_package_folder(name)

    def src_abs(name):
        return package_dir + '/' + name

    def cache_abs(filename):
        return package_store.get_package_cache_folder(name) + '/' + filename

    # Build pkginfo over time, translating fields from buildinfo.
    pkginfo = {}

    # Build up the docker command arguments over time, translating fields as needed.
    cmd = DockerCmd()

    assert (name, variant) in package_store.packages, \
        "Programming error: name, variant should have been validated to be valid before calling build()."
    buildinfo = copy.deepcopy(package_store.get_buildinfo(name, variant))

    if 'name' in buildinfo:
        raise BuildError(
            "'name' is not allowed in buildinfo.json, it is implicitly the name of the "
            "folder containing the buildinfo.json")

    # Convert single_source -> sources
    try:
        sources = expand_single_source_alias(name, buildinfo)
    except ValidationError as ex:
        raise BuildError(
            "Invalid buildinfo.json for package: {}".format(ex)) from ex

    # Save the final sources back into buildinfo so it gets written into
    # buildinfo.json. This also means buildinfo.json is always expanded form.
    buildinfo['sources'] = sources

    # Construct the source fetchers, gather the checkout ids from them
    checkout_ids = dict()
    fetchers = dict()
    try:
        for src_name, src_info in sorted(sources.items()):
            # TODO(cmaloney): Switch to a unified top level cache directory shared by all packages
            cache_dir = package_store.get_package_cache_folder(
                name) + '/' + src_name
            check_call(['mkdir', '-p', cache_dir])
            fetcher = get_src_fetcher(src_info, cache_dir, package_dir)
            fetchers[src_name] = fetcher
            checkout_ids[src_name] = fetcher.get_id()
    except ValidationError as ex:
        raise BuildError(
            "Validation error when fetching sources for package: {}".format(
                ex))

    for src_name, checkout_id in checkout_ids.items():
        # NOTE: single_source buildinfo was expanded above so the src_name is
        # always correct here.
        # Make sure we never accidentally overwrite something which might be
        # important. Fields should match if specified (And that should be
        # tested at some point). For now disallowing identical saves hassle.
        assert_no_duplicate_keys(checkout_id, buildinfo['sources'][src_name])
        buildinfo['sources'][src_name].update(checkout_id)

    # Add the sha1 of the buildinfo.json + build file to the build ids
    build_ids = {"sources": checkout_ids}
    build_ids['build'] = pkgpanda.util.sha1(src_abs(buildinfo['build_script']))
    build_ids['pkgpanda_version'] = pkgpanda.build.constants.version
    build_ids['variant'] = '' if variant is None else variant

    extra_dir = src_abs("extra")
    # Add the "extra" folder inside the package as an additional source if it
    # exists
    if os.path.exists(extra_dir):
        extra_id = hash_folder(extra_dir)
        build_ids['extra_source'] = extra_id
        buildinfo['extra_source'] = extra_id

    # Figure out the docker name.
    docker_name = buildinfo['docker']
    cmd.container = docker_name

    # Add the id of the docker build environment to the build_ids.
    try:
        docker_id = get_docker_id(docker_name)
    except CalledProcessError:
        # docker pull the container and try again
        check_call(['docker', 'pull', docker_name])
        docker_id = get_docker_id(docker_name)

    build_ids['docker'] = docker_id

    # TODO(cmaloney): The environment variables should be generated during build
    # not live in buildinfo.json.
    build_ids['environment'] = buildinfo['environment']

    # Packages need directories inside the fake install root (otherwise docker
    # will try making the directories on a readonly filesystem), so build the
    # install root now, and make the package directories in it as we go.
    install_dir = tempfile.mkdtemp(prefix="pkgpanda-")

    active_packages = list()
    active_package_ids = set()
    active_package_variants = dict()
    auto_deps = set()
    # Verify all requires are in the repository.
    if 'requires' in buildinfo:
        # Final package has the same requires as the build.
        pkginfo['requires'] = buildinfo['requires']

        # TODO(cmaloney): Pull generating the full set of requires a function.
        to_check = copy.deepcopy(buildinfo['requires'])
        if type(to_check) != list:
            raise BuildError(
                "`requires` in buildinfo.json must be an array of dependencies."
            )
        while to_check:
            requires_info = to_check.pop(0)
            requires_name, requires_variant = expand_require(requires_info)

            if requires_name in active_package_variants:
                # TODO(cmaloney): If one package depends on the <default>
                # variant of a package and 1+ others depends on a non-<default>
                # variant then update the dependency to the non-default variant
                # rather than erroring.
                if requires_variant != active_package_variants[requires_name]:
                    # TODO(cmaloney): Make this contain the chains of
                    # dependencies which contain the conflicting packages.
                    # a -> b -> c -> d {foo}
                    # e {bar} -> d {baz}
                    raise BuildError(
                        "Dependncy on multiple variants of the same package {}. "
                        "variants: {} {}".format(
                            requires_name, requires_variant,
                            active_package_variants[requires_name]))

                # The variant has package {requires_name, variant} already is a
                # dependency, don't process it again / move on to the next.
                continue

            active_package_variants[requires_name] = requires_variant

            # Figure out the last build of the dependency, add that as the
            # fully expanded dependency.
            requires_last_build = package_store.get_last_build_filename(
                requires_name, requires_variant)
            if not os.path.exists(requires_last_build):
                if recursive:
                    # Build the dependency
                    build(package_store, requires_name, requires_variant,
                          clean_after_build, recursive)
                else:
                    raise BuildError(
                        "No last build file found for dependency {} variant {}. Rebuild "
                        "the dependency".format(requires_name,
                                                requires_variant))

            try:
                pkg_id_str = load_string(requires_last_build)
                auto_deps.add(pkg_id_str)
                pkg_buildinfo = package_store.get_buildinfo(
                    requires_name, requires_variant)
                pkg_requires = pkg_buildinfo['requires']
                pkg_path = repository.package_path(pkg_id_str)
                pkg_tar = pkg_id_str + '.tar.xz'
                if not os.path.exists(
                        package_store.get_package_cache_folder(requires_name) +
                        '/' + pkg_tar):
                    raise BuildError(
                        "The build tarball {} refered to by the last_build file of the "
                        "dependency {} variant {} doesn't exist. Rebuild the dependency."
                        .format(pkg_tar, requires_name, requires_variant))

                active_package_ids.add(pkg_id_str)

                # Mount the package into the docker container.
                cmd.volumes[
                    pkg_path] = "/opt/mesosphere/packages/{}:ro".format(
                        pkg_id_str)
                os.makedirs(
                    os.path.join(install_dir,
                                 "packages/{}".format(pkg_id_str)))

                # Add the dependencies of the package to the set which will be
                # activated.
                # TODO(cmaloney): All these 'transitive' dependencies shouldn't
                # be available to the package being built, only what depends on
                # them directly.
                to_check += pkg_requires
            except ValidationError as ex:
                raise BuildError(
                    "validating package needed as dependency {0}: {1}".format(
                        requires_name, ex)) from ex
            except PackageError as ex:
                raise BuildError(
                    "loading package needed as dependency {0}: {1}".format(
                        requires_name, ex)) from ex

    # Add requires to the package id, calculate the final package id.
    # NOTE: active_packages isn't fully constructed here since we lazily load
    # packages not already in the repository.
    build_ids['requires'] = list(active_package_ids)
    version_base = hash_checkout(build_ids)
    version = None
    if "version_extra" in buildinfo:
        version = "{0}-{1}".format(buildinfo["version_extra"], version_base)
    else:
        version = version_base
    pkg_id = PackageId.from_parts(name, version)

    # Save the build_ids. Useful for verify exactly what went into the
    # package build hash.
    buildinfo['build_ids'] = build_ids
    buildinfo['package_version'] = version

    # Save the package name and variant. The variant is used when installing
    # packages to validate dependencies.
    buildinfo['name'] = name
    buildinfo['variant'] = variant

    # If the package is already built, don't do anything.
    pkg_path = package_store.get_package_cache_folder(
        name) + '/{}.tar.xz'.format(pkg_id)

    # Done if it exists locally
    if exists(pkg_path):
        print("Package up to date. Not re-building.")

        # TODO(cmaloney): Updating / filling last_build should be moved out of
        # the build function.
        write_string(package_store.get_last_build_filename(name, variant),
                     str(pkg_id))

        return pkg_path

    # Try downloading.
    dl_path = package_store.try_fetch_by_id(pkg_id)
    if dl_path:
        print(
            "Package up to date. Not re-building. Downloaded from repository-url."
        )
        # TODO(cmaloney): Updating / filling last_build should be moved out of
        # the build function.
        write_string(package_store.get_last_build_filename(name, variant),
                     str(pkg_id))
        print(dl_path, pkg_path)
        assert dl_path == pkg_path
        return pkg_path

    # Fall out and do the build since it couldn't be downloaded
    print("Unable to download from cache. Proceeding to build")

    print("Building package {} with buildinfo: {}".format(
        pkg_id, json.dumps(buildinfo, indent=2, sort_keys=True)))

    # Clean out src, result so later steps can use them freely for building.
    def clean():
        # Run a docker container to remove src/ and result/
        cmd = DockerCmd()
        cmd.volumes = {
            package_store.get_package_cache_folder(name): "/pkg/:rw",
        }
        cmd.container = "ubuntu:14.04.4"
        cmd.run(["rm", "-rf", "/pkg/src", "/pkg/result"])

    clean()

    # Only fresh builds are allowed which don't overlap existing artifacts.
    result_dir = cache_abs("result")
    if exists(result_dir):
        raise BuildError(
            "result folder must not exist. It will be made when the package is "
            "built. {}".format(result_dir))

    # 'mkpanda add' all implicit dependencies since we actually need to build.
    for dep in auto_deps:
        print("Auto-adding dependency: {}".format(dep))
        # NOTE: Not using the name pkg_id because that overrides the outer one.
        id_obj = PackageId(dep)
        add_package_file(repository, package_store.get_package_path(id_obj))
        package = repository.load(dep)
        active_packages.append(package)

    # Checkout all the sources int their respective 'src/' folders.
    try:
        src_dir = cache_abs('src')
        if os.path.exists(src_dir):
            raise ValidationError(
                "'src' directory already exists, did you have a previous build? "
                +
                "Currently all builds must be from scratch. Support should be "
                + "added for re-using a src directory when possible. src={}".
                format(src_dir))
        os.mkdir(src_dir)
        for src_name, fetcher in sorted(fetchers.items()):
            root = cache_abs('src/' + src_name)
            os.mkdir(root)

            fetcher.checkout_to(root)
    except ValidationError as ex:
        raise BuildError(
            "Validation error when fetching sources for package: {}".format(
                ex))

    # Copy over environment settings
    pkginfo['environment'] = buildinfo['environment']

    # Whether pkgpanda should on the host make sure a `/var/lib` state directory is available
    pkginfo['state_directory'] = buildinfo.get('state_directory', False)
    if pkginfo['state_directory'] not in [True, False]:
        raise BuildError(
            "state_directory in buildinfo.json must be a boolean `true` or `false`"
        )

    username = buildinfo.get('username')
    if not (username is None or isinstance(username, str)):
        raise BuildError(
            "username in buildinfo.json must be either not set (no user for this"
            " package), or a user name string")
    if username:
        try:
            pkgpanda.UserManagement.validate_username(username)
        except ValidationError as ex:
            raise BuildError(
                "username in buildinfo.json didn't meet the validation rules. {}"
                .format(ex))
    pkginfo['username'] = username

    # Activate the packages so that we have a proper path, environment
    # variables.
    # TODO(cmaloney): RAII type thing for temproary directory so if we
    # don't get all the way through things will be cleaned up?
    install = Install(root=install_dir,
                      config_dir=None,
                      rooted_systemd=True,
                      manage_systemd=False,
                      block_systemd=True,
                      fake_path=True,
                      manage_users=False,
                      manage_state_dir=False)
    install.activate(active_packages)
    # Rewrite all the symlinks inside the active path because we will
    # be mounting the folder into a docker container, and the absolute
    # paths to the packages will change.
    # TODO(cmaloney): This isn't very clean, it would be much nicer to
    # just run pkgpanda inside the package.
    rewrite_symlinks(install_dir, repository.path, "/opt/mesosphere/packages/")

    print("Building package in docker")

    # TODO(cmaloney): Run as a specific non-root user, make it possible
    # for non-root to cleanup afterwards.
    # Run the build, prepping the environment as necessary.
    mkdir(cache_abs("result"))

    # Copy the build info to the resulting tarball
    write_json(cache_abs("src/buildinfo.full.json"), buildinfo)
    write_json(cache_abs("result/buildinfo.full.json"), buildinfo)

    write_json(cache_abs("result/pkginfo.json"), pkginfo)

    # Make the folder for the package we are building. If docker does it, it
    # gets auto-created with root permissions and we can't actually delete it.
    os.makedirs(os.path.join(install_dir, "packages", str(pkg_id)))

    # TOOD(cmaloney): Disallow writing to well known files and directories?
    # Source we checked out
    cmd.volumes.update({
        # TODO(cmaloney): src should be read only...
        cache_abs("src"):
        "/pkg/src:rw",
        # The build script
        src_abs(buildinfo['build_script']):
        "/pkg/build:ro",
        # Getting the result out
        cache_abs("result"):
        "/opt/mesosphere/packages/{}:rw".format(pkg_id),
        install_dir:
        "/opt/mesosphere:ro"
    })

    if os.path.exists(extra_dir):
        cmd.volumes[extra_dir] = "/pkg/extra:ro"

    cmd.environment = {
        "PKG_VERSION": version,
        "PKG_NAME": name,
        "PKG_ID": pkg_id,
        "PKG_PATH": "/opt/mesosphere/packages/{}".format(pkg_id),
        "PKG_VARIANT": variant if variant is not None else "<default>"
    }

    try:
        # TODO(cmaloney): Run a wrapper which sources
        # /opt/mesosphere/environment then runs a build. Also should fix
        # ownership of /opt/mesosphere/packages/{pkg_id} post build.
        cmd.run([
            "/bin/bash", "-o", "nounset", "-o", "pipefail", "-o", "errexit",
            "/pkg/build"
        ])
    except CalledProcessError as ex:
        raise BuildError("docker exited non-zero: {}\nCommand: {}".format(
            ex.returncode, ' '.join(ex.cmd)))

    # Clean up the temporary install dir used for dependencies.
    # TODO(cmaloney): Move to an RAII wrapper.
    check_call(['rm', '-rf', install_dir])

    print("Building package tarball")

    # Check for forbidden services before packaging the tarball:
    try:
        check_forbidden_services(cache_abs("result"), RESERVED_UNIT_NAMES)
    except ValidationError as ex:
        raise BuildError("Package validation failed: {}".format(ex))

    # TODO(cmaloney): Updating / filling last_build should be moved out of
    # the build function.
    write_string(package_store.get_last_build_filename(name, variant),
                 str(pkg_id))

    # Bundle the artifacts into the pkgpanda package
    tmp_name = pkg_path + "-tmp.tar.xz"
    make_tar(tmp_name, cache_abs("result"))
    os.rename(tmp_name, pkg_path)
    print("Package built.")
    if clean_after_build:
        clean()
    return pkg_path
コード例 #18
0
ファイル: config.py プロジェクト: unterstein/dcos
def calculate_ssh_private_key(ssh_private_key_filename):
    if ssh_private_key_filename == '':
        return 'NO KEY PROVIDED - CANNOT TEST'
    return load_string(ssh_private_key_filename)
コード例 #19
0
def build_tree(package_store, mkbootstrap, tree_variant):
    """Build packages and bootstrap tarballs for one or all tree variants.

    Returns a dict mapping tree variants to bootstrap IDs.

    If tree_variant is None, builds all available tree variants.

    """
    # TODO(cmaloney): Add support for circular dependencies. They are doable
    # long as there is a pre-built version of enough of the packages.

    # TODO(cmaloney): Make it so when we're building a treeinfo which has a
    # explicit package list we don't build all the other packages.
    build_order = list()
    visited = set()
    built = set()

    def visit(pkg_tuple: tuple):
        """Add a package and its requires to the build order.

        Raises AssertionError if pkg_tuple is in the set of visited packages.

        If the package has any requires, they're recursively visited and added
        to the build order depth-first. Then the package itself is added.

        """

        # Visit the node for the first (and only) time.
        assert pkg_tuple not in visited
        visited.add(pkg_tuple)

        # Ensure all dependencies are built. Sorted for stability
        for require in sorted(package_store.packages[pkg_tuple]['requires']):
            require_tuple = expand_require(require)

            # If the dependency has already been built, we can move on.
            if require_tuple in built:
                continue
            # If the dependency has not been built but has been visited, then
            # there's a cycle in the dependency graph.
            if require_tuple in visited:
                raise BuildError("Circular dependency. Circular link {0} -> {1}".format(pkg_tuple, require_tuple))

            if PackageId.is_id(require_tuple[0]):
                raise BuildError("Depending on a specific package id is not supported. Package {} "
                                 "depends on {}".format(pkg_tuple, require_tuple))

            if require_tuple not in package_store.packages:
                raise BuildError("Package {0} require {1} not buildable from tree.".format(pkg_tuple, require_tuple))

            # Add the dependency (after its dependencies, if any) to the build
            # order.
            visit(require_tuple)

        build_order.append(pkg_tuple)
        built.add(pkg_tuple)

    # Can't compare none to string, so expand none -> "true" / "false", then put
    # the string in a field after "" if none, the string if not.
    def key_func(elem):
        return elem[0], elem[1] is None, elem[1] or ""

    def visit_packages(package_tuples):
        for pkg_tuple in sorted(package_tuples, key=key_func):
            if pkg_tuple in visited:
                continue
            visit(pkg_tuple)

    if tree_variant:
        package_sets = [package_store.get_package_set(tree_variant)]
    else:
        package_sets = package_store.get_all_package_sets()

    with logger.scope("resolve package graph"):
        # Build all required packages for all tree variants.
        for package_set in package_sets:
            visit_packages(package_set.all_packages)

    built_packages = dict()
    for (name, variant) in build_order:
        built_packages.setdefault(name, dict())

        # Run the build, store the built package path for later use.
        # TODO(cmaloney): Only build the requested variants, rather than all variants.
        built_packages[name][variant] = build(
            package_store,
            name,
            variant,
            True)

    # Build bootstrap tarballs for all tree variants.
    def make_bootstrap(package_set):
        with logger.scope("Making bootstrap variant: {}".format(pkgpanda.util.variant_name(package_set.variant))):
            package_paths = list()
            for name, pkg_variant in package_set.bootstrap_packages:
                package_paths.append(built_packages[name][pkg_variant])

            if mkbootstrap:
                return make_bootstrap_tarball(
                    package_store,
                    list(sorted(package_paths)),
                    package_set.variant)

    # Build bootstraps and and package lists for all variants.
    # TODO(cmaloney): Allow distinguishing between "build all" and "build the default one".
    complete_cache_dir = package_store.get_complete_cache_dir()
    check_call(['mkdir', '-p', complete_cache_dir])
    results = {}
    for package_set in package_sets:
        info = {
            'bootstrap': make_bootstrap(package_set),
            'packages': sorted(
                load_string(package_store.get_last_build_filename(*pkg_tuple))
                for pkg_tuple in package_set.all_packages)}
        write_json(
            complete_cache_dir + '/' + pkgpanda.util.variant_prefix(package_set.variant) + 'complete.latest.json',
            info)
        results[package_set.variant] = info

    return results
コード例 #20
0
ファイル: __init__.py プロジェクト: Jordan50/dcos
def build(variant, package_dir, name, repository_url, clean_after_build):
    print("Building package {} variant {}".format(name, variant or "<default>"))
    tmpdir = tempfile.TemporaryDirectory(prefix="pkgpanda_repo")
    repository = Repository(tmpdir.name)

    def pkg_abs(name):
        return package_dir + '/' + name

    # Build pkginfo over time, translating fields from buildinfo.
    pkginfo = {}

    # Build up the docker command arguments over time, translating fields as needed.
    cmd = DockerCmd()

    buildinfo = load_buildinfo(package_dir, variant)

    if 'name' in buildinfo:
        raise BuildError("'name' is not allowed in buildinfo.json, it is implicitly the name of the "
                         "folder containing the buildinfo.json")

    # Make sure build_script is only set on variants
    if 'build_script' in buildinfo and variant is None:
        raise BuildError("build_script can only be set on package variants")

    # Convert single_source -> sources
    try:
        sources = expand_single_source_alias(name, buildinfo)
    except ValidationError as ex:
        raise BuildError("Invalid buildinfo.json for package: {}".format(ex)) from ex

    # Save the final sources back into buildinfo so it gets written into
    # buildinfo.json. This also means buildinfo.json is always expanded form.
    buildinfo['sources'] = sources

    # Construct the source fetchers, gather the checkout ids from them
    checkout_ids = dict()
    fetchers = dict()
    try:
        for src_name, src_info in sorted(sources.items()):
            if src_info['kind'] not in pkgpanda.build.src_fetchers.all_fetchers:
                raise ValidationError("No known way to catch src with kind '{}'. Known kinds: {}".format(
                    src_info['kind'],
                    pkgpanda.src_fetchers.all_fetchers.keys()))

            cache_dir = pkg_abs("cache")
            if not os.path.exists(cache_dir):
                os.mkdir(cache_dir)

            fetchers[src_name] = pkgpanda.build.src_fetchers.all_fetchers[src_info['kind']](src_name,
                                                                                            src_info,
                                                                                            package_dir)
            checkout_ids[src_name] = fetchers[src_name].get_id()
    except ValidationError as ex:
        raise BuildError("Validation error when fetching sources for package: {}".format(ex))

    for src_name, checkout_id in checkout_ids.items():
        # NOTE: single_source buildinfo was expanded above so the src_name is
        # always correct here.
        # Make sure we never accidentally overwrite something which might be
        # important. Fields should match if specified (And that should be
        # tested at some point). For now disallowing identical saves hassle.
        assert_no_duplicate_keys(checkout_id, buildinfo['sources'][src_name])
        buildinfo['sources'][src_name].update(checkout_id)

    # Add the sha1sum of the buildinfo.json + build file to the build ids
    build_ids = {"sources": checkout_ids}
    build_ids['build'] = pkgpanda.util.sha1(pkg_abs("build"))
    build_ids['pkgpanda_version'] = pkgpanda.build.constants.version
    build_ids['variant'] = '' if variant is None else variant

    extra_dir = pkg_abs("extra")
    # Add the "extra" folder inside the package as an additional source if it
    # exists
    if os.path.exists(extra_dir):
        extra_id = hash_folder(extra_dir)
        build_ids['extra_source'] = extra_id
        buildinfo['extra_source'] = extra_id

    # Figure out the docker name.
    docker_name = buildinfo.get('docker', 'dcos-builder:latest')
    cmd.container = docker_name

    # Add the id of the docker build environment to the build_ids.
    try:
        docker_id = get_docker_id(docker_name)
    except CalledProcessError:
        # docker pull the container and try again
        check_call(['docker', 'pull', docker_name])
        docker_id = get_docker_id(docker_name)

    build_ids['docker'] = docker_id

    # TODO(cmaloney): The environment variables should be generated during build
    # not live in buildinfo.json.
    build_ids['environment'] = buildinfo.get('environment', {})

    # Packages need directories inside the fake install root (otherwise docker
    # will try making the directories on a readonly filesystem), so build the
    # install root now, and make the package directories in it as we go.
    install_dir = tempfile.mkdtemp(prefix="pkgpanda-")

    active_packages = list()
    active_package_ids = set()
    active_package_variants = dict()
    auto_deps = set()
    # Verify all requires are in the repository.
    if 'requires' in buildinfo:
        # Final package has the same requires as the build.
        pkginfo['requires'] = buildinfo['requires']

        # TODO(cmaloney): Pull generating the full set of requires a function.
        to_check = copy.deepcopy(buildinfo['requires'])
        if type(to_check) != list:
            raise BuildError("`requires` in buildinfo.json must be an array of dependencies.")
        while to_check:
            requires_info = to_check.pop(0)
            requires_name, requires_variant = expand_require(requires_info)

            if requires_name in active_package_variants:
                # TODO(cmaloney): If one package depends on the <default>
                # variant of a package and 1+ others depends on a non-<default>
                # variant then update the dependency to the non-default variant
                # rather than erroring.
                if requires_variant != active_package_variants[requires_name]:
                    # TODO(cmaloney): Make this contain the chains of
                    # dependencies which contain the conflicting packages.
                    # a -> b -> c -> d {foo}
                    # e {bar} -> d {baz}
                    raise BuildError("Dependncy on multiple variants of the same package {}. "
                                     "variants: {} {}".format(
                                        requires_name,
                                        requires_variant,
                                        active_package_variants[requires_name]))

                # The variant has package {requires_name, variant} already is a
                # dependency, don't process it again / move on to the next.
                continue

            active_package_variants[requires_name] = requires_variant

            # Figure out the last build of the dependency, add that as the
            # fully expanded dependency.
            require_package_dir = os.path.normpath(pkg_abs('../' + requires_name))
            last_build = require_package_dir + '/' + last_build_filename(requires_variant)
            if not os.path.exists(last_build):
                raise BuildError("No last build file found for dependency {} variant {}. Rebuild "
                                 "the dependency".format(requires_name, requires_variant))

            try:
                pkg_id_str = load_string(last_build)
                auto_deps.add(pkg_id_str)
                pkg_buildinfo = load_buildinfo(require_package_dir, requires_variant)
                pkg_requires = pkg_buildinfo.get('requires', list())
                pkg_path = repository.package_path(pkg_id_str)
                pkg_tar = pkg_id_str + '.tar.xz'
                if not os.path.exists(require_package_dir + '/' + pkg_tar):
                    raise BuildError("The build tarball {} refered to by the last_build file of the "
                                     "dependency {} variant {} doesn't exist. Rebuild the dependency.".format(
                                        pkg_tar,
                                        requires_name,
                                        requires_variant))

                active_package_ids.add(pkg_id_str)

                # Mount the package into the docker container.
                cmd.volumes[pkg_path] = "/opt/mesosphere/packages/{}:ro".format(pkg_id_str)
                os.makedirs(os.path.join(install_dir, "packages/{}".format(pkg_id_str)))

                # Add the dependencies of the package to the set which will be
                # activated.
                # TODO(cmaloney): All these 'transitive' dependencies shouldn't
                # be available to the package being built, only what depends on
                # them directly.
                to_check += pkg_requires
            except ValidationError as ex:
                raise BuildError("validating package needed as dependency {0}: {1}".format(requires_name, ex)) from ex
            except PackageError as ex:
                raise BuildError("loading package needed as dependency {0}: {1}".format(requires_name, ex)) from ex

    # Add requires to the package id, calculate the final package id.
    # NOTE: active_packages isn't fully constructed here since we lazily load
    # packages not already in the repository.
    build_ids['requires'] = list(active_package_ids)
    version_base = hash_checkout(build_ids)
    version = None
    if "version_extra" in buildinfo:
        version = "{0}-{1}".format(buildinfo["version_extra"], version_base)
    else:
        version = version_base
    pkg_id = PackageId.from_parts(name, version)

    # Save the build_ids. Useful for verify exactly what went into the
    # package build hash.
    buildinfo['build_ids'] = build_ids
    buildinfo['package_version'] = version

    # Save the package name and variant. The variant is used when installing
    # packages to validate dependencies.
    buildinfo['name'] = name
    buildinfo['variant'] = variant

    # If the package is already built, don't do anything.
    pkg_path = pkg_abs("{}.tar.xz".format(pkg_id))

    # Done if it exists locally
    if exists(pkg_path):
        print("Package up to date. Not re-building.")

        # TODO(cmaloney): Updating / filling last_build should be moved out of
        # the build function.
        check_call(["mkdir", "-p", pkg_abs("cache")])
        write_string(pkg_abs(last_build_filename(variant)), str(pkg_id))

        return pkg_path

    # Try downloading.
    if repository_url:
        tmp_filename = pkg_path + '.tmp'
        try:
            # Normalize to no trailing slash for repository_url
            repository_url = repository_url.rstrip('/')
            url = repository_url + '/packages/{0}/{1}.tar.xz'.format(pkg_id.name, str(pkg_id))
            print("Attempting to download", pkg_id, "from", url)
            download(tmp_filename, url, package_dir)
            os.rename(tmp_filename, pkg_path)

            print("Package up to date. Not re-building. Downloaded from repository-url.")
            # TODO(cmaloney): Updating / filling last_build should be moved out of
            # the build function.
            check_call(["mkdir", "-p", pkg_abs("cache")])
            write_string(pkg_abs(last_build_filename(variant)), str(pkg_id))
            return pkg_path
        except FetchError:
            try:
                os.remove(tmp_filename)
            except:
                pass

            # Fall out and do the build since the command errored.
            print("Unable to download from cache. Proceeding to build")

    print("Building package {} with buildinfo: {}".format(
        pkg_id,
        json.dumps(buildinfo, indent=2, sort_keys=True)))

    # Clean out src, result so later steps can use them freely for building.
    clean(package_dir)

    # Only fresh builds are allowed which don't overlap existing artifacts.
    result_dir = pkg_abs("result")
    if exists(result_dir):
        raise BuildError("result folder must not exist. It will be made when the package is "
                         "built. {}".format(result_dir))

    # 'mkpanda add' all implicit dependencies since we actually need to build.
    for dep in auto_deps:
        print("Auto-adding dependency: {}".format(dep))
        # NOTE: Not using the name pkg_id because that overrides the outer one.
        id_obj = PackageId(dep)
        add_to_repository(repository, pkg_abs('../{0}/{1}.tar.xz'.format(id_obj.name, dep)))
        package = repository.load(dep)
        active_packages.append(package)

    # Checkout all the sources int their respective 'src/' folders.
    try:
        src_dir = pkg_abs('src')
        if os.path.exists(src_dir):
            raise ValidationError(
                "'src' directory already exists, did you have a previous build? " +
                "Currently all builds must be from scratch. Support should be " +
                "added for re-using a src directory when possible. src={}".format(src_dir))
        os.mkdir(src_dir)
        for src_name, fetcher in sorted(fetchers.items()):
            root = pkg_abs('src/' + src_name)
            os.mkdir(root)

            fetcher.checkout_to(root)
    except ValidationError as ex:
        raise BuildError("Validation error when fetching sources for package: {}".format(ex))

    # Copy over environment settings
    if 'environment' in buildinfo:
        pkginfo['environment'] = buildinfo['environment']

    # Activate the packages so that we have a proper path, environment
    # variables.
    # TODO(cmaloney): RAII type thing for temproary directory so if we
    # don't get all the way through things will be cleaned up?
    install = Install(install_dir, None, True, False, True, True)
    install.activate(active_packages)
    # Rewrite all the symlinks inside the active path because we will
    # be mounting the folder into a docker container, and the absolute
    # paths to the packages will change.
    # TODO(cmaloney): This isn't very clean, it would be much nicer to
    # just run pkgpanda inside the package.
    rewrite_symlinks(install_dir, repository.path, "/opt/mesosphere/packages/")

    print("Building package in docker")

    # TODO(cmaloney): Run as a specific non-root user, make it possible
    # for non-root to cleanup afterwards.
    # Run the build, prepping the environment as necessary.
    mkdir(pkg_abs("result"))

    # Copy the build info to the resulting tarball
    write_json(pkg_abs("src/buildinfo.full.json"), buildinfo)
    write_json(pkg_abs("result/buildinfo.full.json"), buildinfo)

    write_json(pkg_abs("result/pkginfo.json"), pkginfo)

    # Make the folder for the package we are building. If docker does it, it
    # gets auto-created with root permissions and we can't actually delete it.
    os.makedirs(os.path.join(install_dir, "packages", str(pkg_id)))

    # TOOD(cmaloney): Disallow writing to well known files and directories?
    # Source we checked out
    cmd.volumes.update({
        # TODO(cmaloney): src should be read only...
        pkg_abs("src"): "/pkg/src:rw",
        # The build script
        pkg_abs(buildinfo.get('build_script', 'build')): "/pkg/build:ro",
        # Getting the result out
        pkg_abs("result"): "/opt/mesosphere/packages/{}:rw".format(pkg_id),
        install_dir: "/opt/mesosphere:ro"
    })

    if os.path.exists(extra_dir):
        cmd.volumes[extra_dir] = "/pkg/extra:ro"

    cmd.environment = {
        "PKG_VERSION": version,
        "PKG_NAME": name,
        "PKG_ID": pkg_id,
        "PKG_PATH": "/opt/mesosphere/packages/{}".format(pkg_id),
        "PKG_VARIANT": variant if variant is not None else "<default>"
    }

    try:
        # TODO(cmaloney): Run a wrapper which sources
        # /opt/mesosphere/environment then runs a build. Also should fix
        # ownership of /opt/mesosphere/packages/{pkg_id} post build.
        cmd.run([
            "/bin/bash",
            "-o", "nounset",
            "-o", "pipefail",
            "-o", "errexit",
            "/pkg/build"])
    except CalledProcessError as ex:
        raise BuildError("docker exited non-zero: {}\nCommand: {}".format(ex.returncode, ' '.join(ex.cmd)))

    # Clean up the temporary install dir used for dependencies.
    # TODO(cmaloney): Move to an RAII wrapper.
    check_call(['rm', '-rf', install_dir])

    print("Building package tarball")

    # Check for forbidden services before packaging the tarball:
    try:
        check_forbidden_services(pkg_abs("result"), RESERVED_UNIT_NAMES)
    except ValidationError as ex:
        raise BuildError("Package validation failed: {}".format(ex))

    # TODO(cmaloney): Updating / filling last_build should be moved out of
    # the build function.
    check_call(["mkdir", "-p", pkg_abs("cache")])
    write_string(pkg_abs(last_build_filename(variant)), str(pkg_id))

    # Bundle the artifacts into the pkgpanda package
    tmp_name = pkg_path + "-tmp.tar.xz"
    make_tar(tmp_name, pkg_abs("result"))
    os.rename(tmp_name, pkg_path)
    print("Package built.")
    if clean_after_build:
        clean(package_dir)
    return pkg_path
コード例 #21
0
def test_signal_service(dcos_api_session):
    """
    signal-service runs on an hourly timer, this test runs it as a one-off
    and pushes the results to the test_server app for easy retrieval
    """
    # This is due to caching done by 3DT / Signal service
    # We're going to remove this soon: https://mesosphere.atlassian.net/browse/DCOS-9050
    dcos_version = os.environ["DCOS_VERSION"]
    signal_config_data = load_json('/opt/mesosphere/etc/dcos-signal-config.json')
    customer_key = signal_config_data.get('customer_key', '')
    enabled = signal_config_data.get('enabled', 'false')
    cluster_id = load_string('/var/lib/dcos/cluster-id').strip()

    if enabled == 'false':
        pytest.skip('Telemetry disabled in /opt/mesosphere/etc/dcos-signal-config.json... skipping test')

    logging.info("Version: " + dcos_version)
    logging.info("Customer Key: " + customer_key)
    logging.info("Cluster ID: " + cluster_id)

    direct_report = dcos_api_session.get('/system/health/v1/report?cache=0')
    signal_results = subprocess.check_output(["/opt/mesosphere/bin/dcos-signal", "-test"], universal_newlines=True)
    r_data = json.loads(signal_results)

    exp_data = {
        'diagnostics': {
            'event': 'health',
            'anonymousId': cluster_id,
            'properties': {}
        },
        'cosmos': {
            'event': 'package_list',
            'anonymousId': cluster_id,
            'properties': {}
        },
        'mesos': {
            'event': 'mesos_track',
            'anonymousId': cluster_id,
            'properties': {}
        }
    }

    # Generic properties which are the same between all tracks
    generic_properties = {
        'platform': expanded_config['platform'],
        'provider': expanded_config['provider'],
        'source': 'cluster',
        'clusterId': cluster_id,
        'customerKey': customer_key,
        'environmentVersion': dcos_version,
        'variant': 'open'
    }

    # Insert the generic property data which is the same between all signal tracks
    exp_data['diagnostics']['properties'].update(generic_properties)
    exp_data['cosmos']['properties'].update(generic_properties)
    exp_data['mesos']['properties'].update(generic_properties)

    # Insert all the diagnostics data programmatically
    master_units = [
        'adminrouter-service',
        'adminrouter-reload-service',
        'adminrouter-reload-timer',
        'cosmos-service',
        'metrics-master-service',
        'metrics-master-socket',
        'exhibitor-service',
        'history-service',
        'log-master-service',
        'log-master-socket',
        'logrotate-master-service',
        'logrotate-master-timer',
        'marathon-service',
        'mesos-dns-service',
        'mesos-master-service',
        'metronome-service',
        'signal-service']
    all_node_units = [
        '3dt-service',
        '3dt-socket',
        'epmd-service',
        'gen-resolvconf-service',
        'gen-resolvconf-timer',
        'navstar-service',
        'pkgpanda-api-service',
        'pkgpanda-api-socket',
        'signal-timer',
        'spartan-service',
        'spartan-watchdog-service',
        'spartan-watchdog-timer']
    slave_units = [
        'mesos-slave-service']
    public_slave_units = [
        'mesos-slave-public-service']
    all_slave_units = [
        'docker-gc-service',
        'docker-gc-timer',
        'metrics-agent-service',
        'metrics-agent-socket',
        'adminrouter-agent-service',
        'adminrouter-agent-reload-service',
        'adminrouter-agent-reload-timer',
        'log-agent-service',
        'log-agent-socket',
        'logrotate-agent-service',
        'logrotate-agent-timer',
        'rexray-service']

    master_units.append('oauth-service')

    for unit in master_units:
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] = len(dcos_api_session.masters)
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0
    for unit in all_node_units:
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] = len(
            dcos_api_session.all_slaves + dcos_api_session.masters)
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0
    for unit in slave_units:
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] = len(dcos_api_session.slaves)
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0
    for unit in public_slave_units:
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] \
            = len(dcos_api_session.public_slaves)
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0
    for unit in all_slave_units:
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] \
            = len(dcos_api_session.all_slaves)
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0

    def check_signal_data():
        # Check the entire hash of diagnostics data
        assert r_data['diagnostics'] == exp_data['diagnostics']
        # Check a subset of things regarding Mesos that we can logically check for
        framework_names = [x['name'] for x in r_data['mesos']['properties']['frameworks']]
        assert 'marathon' in framework_names
        assert 'metronome' in framework_names
        # There are no packages installed by default on the integration test, ensure the key exists
        assert len(r_data['cosmos']['properties']['package_list']) == 0

    try:
        check_signal_data()
    except AssertionError as err:
        logging.info('System report: {}'.format(direct_report.json()))
        raise err
コード例 #22
0
ファイル: __init__.py プロジェクト: alberts/dcos
def build(package_store, name, variant, clean_after_build, recursive=False):
    assert isinstance(package_store, PackageStore)
    print("Building package {} variant {}".format(name, pkgpanda.util.variant_str(variant)))
    tmpdir = tempfile.TemporaryDirectory(prefix="pkgpanda_repo")
    repository = Repository(tmpdir.name)

    package_dir = package_store.get_package_folder(name)

    def src_abs(name):
        return package_dir + '/' + name

    def cache_abs(filename):
        return package_store.get_package_cache_folder(name) + '/' + filename

    # Build pkginfo over time, translating fields from buildinfo.
    pkginfo = {}

    # Build up the docker command arguments over time, translating fields as needed.
    cmd = DockerCmd()

    assert (name, variant) in package_store.packages, \
        "Programming error: name, variant should have been validated to be valid before calling build()."

    builder = IdBuilder(package_store.get_buildinfo(name, variant))
    final_buildinfo = dict()

    builder.add('name', name)
    builder.add('variant', pkgpanda.util.variant_str(variant))

    # Convert single_source -> sources
    if builder.has('sources'):
        if builder.has('single_source'):
            raise BuildError('Both sources and single_source cannot be specified at the same time')
        sources = builder.take('sources')
    elif builder.has('single_source'):
        sources = {name: builder.take('single_source')}
        builder.replace('single_source', 'sources', sources)
    else:
        builder.add('sources', {})
        sources = dict()
        print("NOTICE: No sources specified")

    final_buildinfo['sources'] = sources

    # Construct the source fetchers, gather the checkout ids from them
    checkout_ids = dict()
    fetchers = dict()
    try:
        for src_name, src_info in sorted(sources.items()):
            # TODO(cmaloney): Switch to a unified top level cache directory shared by all packages
            cache_dir = package_store.get_package_cache_folder(name) + '/' + src_name
            check_call(['mkdir', '-p', cache_dir])
            fetcher = get_src_fetcher(src_info, cache_dir, package_dir)
            fetchers[src_name] = fetcher
            checkout_ids[src_name] = fetcher.get_id()
    except ValidationError as ex:
        raise BuildError("Validation error when fetching sources for package: {}".format(ex))

    for src_name, checkout_id in checkout_ids.items():
        # NOTE: single_source buildinfo was expanded above so the src_name is
        # always correct here.
        # Make sure we never accidentally overwrite something which might be
        # important. Fields should match if specified (And that should be
        # tested at some point). For now disallowing identical saves hassle.
        assert_no_duplicate_keys(checkout_id, final_buildinfo['sources'][src_name])
        final_buildinfo['sources'][src_name].update(checkout_id)

    # Add the sha1 of the buildinfo.json + build file to the build ids
    builder.update('sources', checkout_ids)
    build_script = src_abs(builder.take('build_script'))
    # TODO(cmaloney): Change dest name to build_script_sha1
    builder.replace('build_script', 'build', pkgpanda.util.sha1(build_script))
    builder.add('pkgpanda_version', pkgpanda.build.constants.version)

    extra_dir = src_abs("extra")
    # Add the "extra" folder inside the package as an additional source if it
    # exists
    if os.path.exists(extra_dir):
        extra_id = hash_folder(extra_dir)
        builder.add('extra_source', extra_id)
        final_buildinfo['extra_source'] = extra_id

    # Figure out the docker name.
    docker_name = builder.take('docker')
    cmd.container = docker_name

    # Add the id of the docker build environment to the build_ids.
    try:
        docker_id = get_docker_id(docker_name)
    except CalledProcessError:
        # docker pull the container and try again
        check_call(['docker', 'pull', docker_name])
        docker_id = get_docker_id(docker_name)

    builder.update('docker', docker_id)

    # TODO(cmaloney): The environment variables should be generated during build
    # not live in buildinfo.json.
    pkginfo['environment'] = builder.take('environment')

    # Whether pkgpanda should on the host make sure a `/var/lib` state directory is available
    pkginfo['state_directory'] = builder.take('state_directory')
    if pkginfo['state_directory'] not in [True, False]:
        raise BuildError("state_directory in buildinfo.json must be a boolean `true` or `false`")

    username = None
    if builder.has('username'):
        username = builder.take('username')
        if not isinstance(username, str):
            raise BuildError("username in buildinfo.json must be either not set (no user for this"
                             " package), or a user name string")
        try:
            pkgpanda.UserManagement.validate_username(username)
        except ValidationError as ex:
            raise BuildError("username in buildinfo.json didn't meet the validation rules. {}".format(ex))
        pkginfo['username'] = username

    group = None
    if builder.has('group'):
        group = builder.take('group')
        if not isinstance(group, str):
            raise BuildError("group in buildinfo.json must be either not set (use default group for this user)"
                             ", or group must be a string")
        try:
            pkgpanda.UserManagement.validate_group_name(group)
        except ValidationError as ex:
            raise BuildError("group in buildinfo.json didn't meet the validation rules. {}".format(ex))
        pkginfo['group'] = group

    # Packages need directories inside the fake install root (otherwise docker
    # will try making the directories on a readonly filesystem), so build the
    # install root now, and make the package directories in it as we go.
    install_dir = tempfile.mkdtemp(prefix="pkgpanda-")

    active_packages = list()
    active_package_ids = set()
    active_package_variants = dict()
    auto_deps = set()

    # Final package has the same requires as the build.
    requires = builder.take('requires')
    pkginfo['requires'] = requires

    if builder.has("sysctl"):
        pkginfo["sysctl"] = builder.take("sysctl")

    # TODO(cmaloney): Pull generating the full set of requires a function.
    to_check = copy.deepcopy(requires)
    if type(to_check) != list:
        raise BuildError("`requires` in buildinfo.json must be an array of dependencies.")
    while to_check:
        requires_info = to_check.pop(0)
        requires_name, requires_variant = expand_require(requires_info)

        if requires_name in active_package_variants:
            # TODO(cmaloney): If one package depends on the <default>
            # variant of a package and 1+ others depends on a non-<default>
            # variant then update the dependency to the non-default variant
            # rather than erroring.
            if requires_variant != active_package_variants[requires_name]:
                # TODO(cmaloney): Make this contain the chains of
                # dependencies which contain the conflicting packages.
                # a -> b -> c -> d {foo}
                # e {bar} -> d {baz}
                raise BuildError(
                    "Dependncy on multiple variants of the same package {}. variants: {} {}".format(
                        requires_name,
                        requires_variant,
                        active_package_variants[requires_name]))

            # The variant has package {requires_name, variant} already is a
            # dependency, don't process it again / move on to the next.
            continue

        active_package_variants[requires_name] = requires_variant

        # Figure out the last build of the dependency, add that as the
        # fully expanded dependency.
        requires_last_build = package_store.get_last_build_filename(requires_name, requires_variant)
        if not os.path.exists(requires_last_build):
            if recursive:
                # Build the dependency
                build(package_store, requires_name, requires_variant, clean_after_build, recursive)
            else:
                raise BuildError("No last build file found for dependency {} variant {}. Rebuild "
                                 "the dependency".format(requires_name, requires_variant))

        try:
            pkg_id_str = load_string(requires_last_build)
            auto_deps.add(pkg_id_str)
            pkg_buildinfo = package_store.get_buildinfo(requires_name, requires_variant)
            pkg_requires = pkg_buildinfo['requires']
            pkg_path = repository.package_path(pkg_id_str)
            pkg_tar = pkg_id_str + '.tar.xz'
            if not os.path.exists(package_store.get_package_cache_folder(requires_name) + '/' + pkg_tar):
                raise BuildError(
                    "The build tarball {} refered to by the last_build file of the dependency {} "
                    "variant {} doesn't exist. Rebuild the dependency.".format(
                        pkg_tar,
                        requires_name,
                        requires_variant))

            active_package_ids.add(pkg_id_str)

            # Mount the package into the docker container.
            cmd.volumes[pkg_path] = "/opt/mesosphere/packages/{}:ro".format(pkg_id_str)
            os.makedirs(os.path.join(install_dir, "packages/{}".format(pkg_id_str)))

            # Add the dependencies of the package to the set which will be
            # activated.
            # TODO(cmaloney): All these 'transitive' dependencies shouldn't
            # be available to the package being built, only what depends on
            # them directly.
            to_check += pkg_requires
        except ValidationError as ex:
            raise BuildError("validating package needed as dependency {0}: {1}".format(requires_name, ex)) from ex
        except PackageError as ex:
            raise BuildError("loading package needed as dependency {0}: {1}".format(requires_name, ex)) from ex

    # Add requires to the package id, calculate the final package id.
    # NOTE: active_packages isn't fully constructed here since we lazily load
    # packages not already in the repository.
    builder.update('requires', list(active_package_ids))
    version_extra = None
    if builder.has('version_extra'):
        version_extra = builder.take('version_extra')

    build_ids = builder.get_build_ids()
    version_base = hash_checkout(build_ids)
    version = None
    if builder.has('version_extra'):
        version = "{0}-{1}".format(version_extra, version_base)
    else:
        version = version_base
    pkg_id = PackageId.from_parts(name, version)

    # Everything must have been extracted by now. If it wasn't, then we just
    # had a hard error that it was set but not used, as well as didn't include
    # it in the caluclation of the PackageId.
    builder = None

    # Save the build_ids. Useful for verify exactly what went into the
    # package build hash.
    final_buildinfo['build_ids'] = build_ids
    final_buildinfo['package_version'] = version

    # Save the package name and variant. The variant is used when installing
    # packages to validate dependencies.
    final_buildinfo['name'] = name
    final_buildinfo['variant'] = variant

    # If the package is already built, don't do anything.
    pkg_path = package_store.get_package_cache_folder(name) + '/{}.tar.xz'.format(pkg_id)

    # Done if it exists locally
    if exists(pkg_path):
        print("Package up to date. Not re-building.")

        # TODO(cmaloney): Updating / filling last_build should be moved out of
        # the build function.
        write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))

        return pkg_path

    # Try downloading.
    dl_path = package_store.try_fetch_by_id(pkg_id)
    if dl_path:
        print("Package up to date. Not re-building. Downloaded from repository-url.")
        # TODO(cmaloney): Updating / filling last_build should be moved out of
        # the build function.
        write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))
        print(dl_path, pkg_path)
        assert dl_path == pkg_path
        return pkg_path

    # Fall out and do the build since it couldn't be downloaded
    print("Unable to download from cache. Proceeding to build")

    print("Building package {} with buildinfo: {}".format(
        pkg_id,
        json.dumps(final_buildinfo, indent=2, sort_keys=True)))

    # Clean out src, result so later steps can use them freely for building.
    def clean():
        # Run a docker container to remove src/ and result/
        cmd = DockerCmd()
        cmd.volumes = {
            package_store.get_package_cache_folder(name): "/pkg/:rw",
        }
        cmd.container = "ubuntu:14.04.4"
        cmd.run("package-cleaner", ["rm", "-rf", "/pkg/src", "/pkg/result"])

    clean()

    # Only fresh builds are allowed which don't overlap existing artifacts.
    result_dir = cache_abs("result")
    if exists(result_dir):
        raise BuildError("result folder must not exist. It will be made when the package is "
                         "built. {}".format(result_dir))

    # 'mkpanda add' all implicit dependencies since we actually need to build.
    for dep in auto_deps:
        print("Auto-adding dependency: {}".format(dep))
        # NOTE: Not using the name pkg_id because that overrides the outer one.
        id_obj = PackageId(dep)
        add_package_file(repository, package_store.get_package_path(id_obj))
        package = repository.load(dep)
        active_packages.append(package)

    # Checkout all the sources int their respective 'src/' folders.
    try:
        src_dir = cache_abs('src')
        if os.path.exists(src_dir):
            raise ValidationError(
                "'src' directory already exists, did you have a previous build? " +
                "Currently all builds must be from scratch. Support should be " +
                "added for re-using a src directory when possible. src={}".format(src_dir))
        os.mkdir(src_dir)
        for src_name, fetcher in sorted(fetchers.items()):
            root = cache_abs('src/' + src_name)
            os.mkdir(root)

            fetcher.checkout_to(root)
    except ValidationError as ex:
        raise BuildError("Validation error when fetching sources for package: {}".format(ex))

    # Activate the packages so that we have a proper path, environment
    # variables.
    # TODO(cmaloney): RAII type thing for temproary directory so if we
    # don't get all the way through things will be cleaned up?
    install = Install(
        root=install_dir,
        config_dir=None,
        rooted_systemd=True,
        manage_systemd=False,
        block_systemd=True,
        fake_path=True,
        manage_users=False,
        manage_state_dir=False)
    install.activate(active_packages)
    # Rewrite all the symlinks inside the active path because we will
    # be mounting the folder into a docker container, and the absolute
    # paths to the packages will change.
    # TODO(cmaloney): This isn't very clean, it would be much nicer to
    # just run pkgpanda inside the package.
    rewrite_symlinks(install_dir, repository.path, "/opt/mesosphere/packages/")

    print("Building package in docker")

    # TODO(cmaloney): Run as a specific non-root user, make it possible
    # for non-root to cleanup afterwards.
    # Run the build, prepping the environment as necessary.
    mkdir(cache_abs("result"))

    # Copy the build info to the resulting tarball
    write_json(cache_abs("src/buildinfo.full.json"), final_buildinfo)
    write_json(cache_abs("result/buildinfo.full.json"), final_buildinfo)

    write_json(cache_abs("result/pkginfo.json"), pkginfo)

    # Make the folder for the package we are building. If docker does it, it
    # gets auto-created with root permissions and we can't actually delete it.
    os.makedirs(os.path.join(install_dir, "packages", str(pkg_id)))

    # TOOD(cmaloney): Disallow writing to well known files and directories?
    # Source we checked out
    cmd.volumes.update({
        # TODO(cmaloney): src should be read only...
        cache_abs("src"): "/pkg/src:rw",
        # The build script
        build_script: "/pkg/build:ro",
        # Getting the result out
        cache_abs("result"): "/opt/mesosphere/packages/{}:rw".format(pkg_id),
        install_dir: "/opt/mesosphere:ro"
    })

    if os.path.exists(extra_dir):
        cmd.volumes[extra_dir] = "/pkg/extra:ro"

    cmd.environment = {
        "PKG_VERSION": version,
        "PKG_NAME": name,
        "PKG_ID": pkg_id,
        "PKG_PATH": "/opt/mesosphere/packages/{}".format(pkg_id),
        "PKG_VARIANT": variant if variant is not None else "<default>",
        "NUM_CORES": multiprocessing.cpu_count()
    }

    try:
        # TODO(cmaloney): Run a wrapper which sources
        # /opt/mesosphere/environment then runs a build. Also should fix
        # ownership of /opt/mesosphere/packages/{pkg_id} post build.
        cmd.run("package-builder", [
            "/bin/bash",
            "-o", "nounset",
            "-o", "pipefail",
            "-o", "errexit",
            "/pkg/build"])
    except CalledProcessError as ex:
        raise BuildError("docker exited non-zero: {}\nCommand: {}".format(ex.returncode, ' '.join(ex.cmd)))

    # Clean up the temporary install dir used for dependencies.
    # TODO(cmaloney): Move to an RAII wrapper.
    check_call(['rm', '-rf', install_dir])

    print("Building package tarball")

    # Check for forbidden services before packaging the tarball:
    try:
        check_forbidden_services(cache_abs("result"), RESERVED_UNIT_NAMES)
    except ValidationError as ex:
        raise BuildError("Package validation failed: {}".format(ex))

    # TODO(cmaloney): Updating / filling last_build should be moved out of
    # the build function.
    write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))

    # Bundle the artifacts into the pkgpanda package
    tmp_name = pkg_path + "-tmp.tar.xz"
    make_tar(tmp_name, cache_abs("result"))
    os.rename(tmp_name, pkg_path)
    print("Package built.")
    if clean_after_build:
        clean()
    return pkg_path
コード例 #23
0
ファイル: exhibitor.py プロジェクト: alberts/dcos
def get_zk_pid():
    return load_string(zk_pid_path)
コード例 #24
0
ファイル: test_upgrade_vpc.py プロジェクト: cmaloney/dcos
def main():
    num_masters = int(os.getenv('MASTERS', '3'))
    num_agents = int(os.getenv('AGENTS', '2'))
    num_public_agents = int(os.getenv('PUBLIC_AGENTS', '1'))
    stack_name = 'upgrade-test-' + ''.join(
        random.choice(string.ascii_uppercase + string.digits)
        for _ in range(10))

    test_cmd = os.getenv('DCOS_PYTEST_CMD',
                         'py.test -vv -s -rs ' + os.getenv('CI_FLAGS', ''))

    stable_installer_url = os.environ['STABLE_INSTALLER_URL']
    installer_url = os.environ['INSTALLER_URL']

    vpc, ssh_info = test_util.aws.VpcCfStack.create(
        stack_name=stack_name,
        instance_type='m4.xlarge',
        instance_os='cent-os-7-dcos-prereqs',
        # An instance for each cluster node plus the bootstrap.
        instance_count=(num_masters + num_agents + num_public_agents + 1),
        admin_location='0.0.0.0/0',
        key_pair_name='default',
        boto_wrapper=test_util.aws.BotoWrapper(
            region=os.getenv('DEFAULT_AWS_REGION', 'eu-central-1'),
            aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),
            aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY'),
        ),
    )
    vpc.wait_for_stack_creation()
    cluster = test_util.cluster.Cluster.from_vpc(
        vpc,
        ssh_info,
        ssh_key=load_string(os.getenv('DCOS_SSH_KEY_PATH', 'default_ssh_key')),
        num_masters=num_masters,
        num_agents=num_agents,
        num_public_agents=num_public_agents,
    )

    # Use the CLI installer to set exhibitor_storage_backend = zookeeper.
    test_util.cluster.install_dcos(cluster, stable_installer_url, api=False)

    master_list = [h.private_ip for h in cluster.masters]

    cluster_api = ClusterApi(
        'http://{ip}'.format(ip=cluster.masters[0].public_ip),
        master_list,
        master_list,
        [h.private_ip for h in cluster.agents],
        [h.private_ip for h in cluster.public_agents],
        "root",  # default_os_user
        web_auth_default_user=DcosUser(CI_AUTH_JSON),
        ca_cert_path=None)

    cluster_api.wait_for_dcos()

    # Deploy an app
    cluster_api.marathon.deploy_app(get_test_app())

    task_info_before_upgrade = get_task_info(
        cluster_api.marathon.get('v2/apps').json(),
        cluster_api.marathon.get('v2/tasks').json())

    assert task_info_before_upgrade is not None, "Unable to get task details of the cluster."
    assert task_info_before_upgrade.state == "TASK_RUNNING", "Task is not in the running state."

    with cluster.ssher.tunnel(cluster.bootstrap_host) as bootstrap_host_tunnel:
        bootstrap_host_tunnel.remote_cmd(
            ['sudo', 'rm', '-rf', cluster.ssher.home_dir + '/*'])

    test_util.cluster.upgrade_dcos(cluster, installer_url)

    task_info_after_upgrade = get_task_info(
        cluster_api.marathon.get('v2/apps').json(),
        cluster_api.marathon.get('v2/tasks').json())

    assert task_info_after_upgrade is not None, "Unable to get the tasks details of the cluster."
    assert task_info_after_upgrade.state == "TASK_RUNNING", "Task is not in the running state."

    assert task_info_before_upgrade.id == task_info_after_upgrade.id, \
        "Task ID before and after the upgrade did not match."

    # There has happened at least one health-check in the new cluster since the last health-check in the old cluster.
    assert (task_info_after_upgrade.last_success_time >
            task_info_before_upgrade.last_success_time + task_info_before_upgrade.health_check_interval), \
        "Invalid health-check for the task in the upgraded cluster."

    result = test_util.cluster.run_integration_tests(cluster,
                                                     test_cmd=test_cmd)

    if result == 0:
        log.info("Test successsful! Deleting VPC if provided in this run...")
        vpc.delete()
    else:
        log.info(
            "Test failed! VPC cluster will remain available for debugging for 2 hour after instantiation."
        )
    sys.exit(result)
コード例 #25
0
ファイル: config.py プロジェクト: warrenween/dcos
def calculate_ssh_private_key(ssh_private_key_filename):
    if ssh_private_key_filename == '':
        return launch.util.NO_TEST_FLAG
    return load_string(ssh_private_key_filename)
コード例 #26
0
def main():
    options = check_environment()

    cluster = None
    vpc = None
    ssh_key = load_string(options.ssh_key_path)
    if options.host_list is None:
        log.info('VPC_HOSTS not provided, requesting new VPC ...')
        random_identifier = ''.join(
            random.choice(string.ascii_uppercase + string.digits)
            for _ in range(10))
        unique_cluster_id = "installer-test-{}".format(random_identifier)
        log.info("Spinning up AWS VPC with ID: {}".format(unique_cluster_id))
        if options.test_install_prereqs:
            os_name = "cent-os-7"
        else:
            os_name = "cent-os-7-dcos-prereqs"
        # TODO(mellenburg): Switch to using generated keys
        bw = test_util.aws.BotoWrapper(
            region=DEFAULT_AWS_REGION,
            aws_access_key_id=options.aws_access_key_id,
            aws_secret_access_key=options.aws_secret_access_key)
        vpc, ssh_info = test_util.aws.VpcCfStack.create(
            stack_name=unique_cluster_id,
            instance_type=options.instance_type,
            instance_os=os_name,
            # An instance for each cluster node plus the bootstrap.
            instance_count=(options.masters + options.agents +
                            options.public_agents + 1),
            admin_location='0.0.0.0/0',
            key_pair_name='default',
            boto_wrapper=bw)
        vpc.wait_for_stack_creation()

        cluster = test_util.cluster.Cluster.from_vpc(
            vpc,
            ssh_info,
            ssh_key=ssh_key,
            num_masters=options.masters,
            num_agents=options.agents,
            num_public_agents=options.public_agents,
        )
    else:
        # Assume an existing onprem CentOS cluster.
        cluster = test_util.cluster.Cluster.from_hosts(
            ssh_info=test_util.aws.SSH_INFO['centos'],
            ssh_key=ssh_key,
            hosts=options.host_list,
            num_masters=options.masters,
            num_agents=options.agents,
            num_public_agents=options.public_agents,
        )

    test_util.cluster.install_dcos(
        cluster,
        installer_url=options.installer_url,
        setup=options.do_setup,
        api=options.use_api,
        add_config_path=options.add_config_path,
        # If we don't want to test the prereq install, use offline mode to avoid it.
        installer_api_offline_mode=(not options.test_install_prereqs),
        install_prereqs=options.test_install_prereqs,
        install_prereqs_only=options.test_install_prereqs_only,
    )

    if options.test_install_prereqs and options.test_install_prereqs_only:
        # install_dcos() exited after running prereqs, so we're done.
        if vpc:
            vpc.delete()
        sys.exit(0)

    result = test_util.cluster.run_integration_tests(
        cluster,
        # Setting dns_search: mesos not currently supported in API
        region=DEFAULT_AWS_REGION,
        aws_access_key_id=options.aws_access_key_id,
        aws_secret_access_key=options.aws_secret_access_key,
        test_cmd=options.test_cmd,
    )

    if result == 0:
        log.info("Test successful! Deleting VPC if provided in this run.")
        if vpc:
            vpc.delete()
    else:
        log.info(
            "Test failed! VPC will remain for debugging 1 hour from instantiation"
        )
    if options.ci_flags:
        result = 0  # Wipe the return code so that tests can be muted in CI
    sys.exit(result)
コード例 #27
0
ファイル: launch.py プロジェクト: zhaixuepan/dcos
    def ssh_from_config(self, config):
        """
        In order to deploy AWS instances, one must use an SSH key pair that was
        previously created in and downloaded from AWS. The private key cannot be
        obtained after its creation; this helper exists to remove this hassle.

        This method will take the config dict, scan the parameters for KeyName,
        and should KeyName not be found it generate a key and amend the config.
        Returns two dicts: ssh_info and temporary resources. SSH user cannot be
        inferred, so the user must still provide this explicitly via the field
        'private_key_path'

        Thus, there are 4 possible allowable scenarios:
        ### Result: nothing generated, testing possible ###
        ---
        parameters:
          - ParameterKey: KeyName
            ParameterValue: my_key_name
        ssh_info:
          user: foo
          private_key_path: path_to_my_key

        ### Result: key generated, testing possible ###
        ---
        ssh_info:
          user: foo

        ### Result: nothing generated, testing not possible
        ---
        parameters:
          - ParameterKey: KeyName
            ParameterValue: my_key_name

        ### Result: key generated, testing not possible
        ---
        """
        temp_resources = {}
        key_name = None
        private_key = None
        # Native AWS parameters take precedence
        if 'parameters' in config:
            for p in config['parameters']:
                if p['ParameterKey'] == 'KeyName':
                    key_name = p['ParameterValue']
        # check ssh_info data
        if 'ssh_info' in config and 'private_key_path' in config['ssh_info']:
            if key_name is None:
                raise LauncherError(
                    'ValidationError', 'If a private_key_path is provided, '
                    'then the KeyName template parameter must be set')
            private_key = load_string(config['ssh_info']['private_key_path'])
        if not key_name:
            key_name = config['stack_name']
            private_key = self.boto_wrapper.create_key_pair(key_name)
            temp_resources['key_name'] = key_name
            cf_params = config.get('parameters', list())
            cf_params.append({
                'ParameterKey': 'KeyName',
                'ParameterValue': key_name
            })
            config['parameters'] = cf_params
        user = config.get('ssh_info', dict()).get('user', None)
        ssh_info = {'private_key': private_key, 'user': user}
        if user is None:
            print(
                'Testing not possible; user must be provided under ssh_info in config YAML'
            )
        if private_key is None:
            print(
                'Testing not possible; private_key_path must be provided under ssh_info in config YAML'
            )
        return ssh_info, temp_resources
コード例 #28
0
ファイル: test_composition.py プロジェクト: tamarrow/dcos
def test_signal_service(dcos_api_session):
    """
    signal-service runs on an hourly timer, this test runs it as a one-off
    and pushes the results to the test_server app for easy retrieval
    """
    # This is due to caching done by 3DT / Signal service
    # We're going to remove this soon: https://mesosphere.atlassian.net/browse/DCOS-9050
    dcos_version = os.environ["DCOS_VERSION"]
    signal_config_data = load_json('/opt/mesosphere/etc/dcos-signal-config.json')
    customer_key = signal_config_data.get('customer_key', '')
    enabled = signal_config_data.get('enabled', 'false')
    cluster_id = load_string('/var/lib/dcos/cluster-id').strip()

    if enabled == 'false':
        pytest.skip('Telemetry disabled in /opt/mesosphere/etc/dcos-signal-config.json... skipping test')

    logging.info("Version: " + dcos_version)
    logging.info("Customer Key: " + customer_key)
    logging.info("Cluster ID: " + cluster_id)

    direct_report = dcos_api_session.get('/system/health/v1/report?cache=0')
    signal_results = subprocess.check_output(["/opt/mesosphere/bin/dcos-signal", "-test"], universal_newlines=True)
    r_data = json.loads(signal_results)

    exp_data = {
        'diagnostics': {
            'event': 'health',
            'anonymousId': cluster_id,
            'properties': {}
        },
        'cosmos': {
            'event': 'package_list',
            'anonymousId': cluster_id,
            'properties': {}
        },
        'mesos': {
            'event': 'mesos_track',
            'anonymousId': cluster_id,
            'properties': {}
        }
    }

    # Generic properties which are the same between all tracks
    generic_properties = {
        'platform': expanded_config['platform'],
        'provider': expanded_config['provider'],
        'source': 'cluster',
        'clusterId': cluster_id,
        'customerKey': customer_key,
        'environmentVersion': dcos_version,
        'variant': 'open'
    }

    # Insert the generic property data which is the same between all signal tracks
    exp_data['diagnostics']['properties'].update(generic_properties)
    exp_data['cosmos']['properties'].update(generic_properties)
    exp_data['mesos']['properties'].update(generic_properties)

    # Insert all the diagnostics data programmatically
    master_units = [
        'adminrouter-service',
        'adminrouter-reload-service',
        'adminrouter-reload-timer',
        'cosmos-service',
        'metrics-master-service',
        'metrics-master-socket',
        'exhibitor-service',
        'history-service',
        'log-master-service',
        'log-master-socket',
        'logrotate-master-service',
        'logrotate-master-timer',
        'marathon-service',
        'mesos-dns-service',
        'mesos-master-service',
        'metronome-service',
        'signal-service']
    all_node_units = [
        '3dt-service',
        '3dt-socket',
        'epmd-service',
        'gen-resolvconf-service',
        'gen-resolvconf-timer',
        'navstar-service',
        'pkgpanda-api-service',
        'signal-timer',
        'spartan-service',
        'spartan-watchdog-service',
        'spartan-watchdog-timer']
    slave_units = [
        'mesos-slave-service']
    public_slave_units = [
        'mesos-slave-public-service']
    all_slave_units = [
        'docker-gc-service',
        'docker-gc-timer',
        'metrics-agent-service',
        'metrics-agent-socket',
        'adminrouter-agent-service',
        'adminrouter-agent-reload-service',
        'adminrouter-agent-reload-timer',
        'log-agent-service',
        'log-agent-socket',
        'logrotate-agent-service',
        'logrotate-agent-timer',
        'rexray-service']

    master_units.append('oauth-service')

    for unit in master_units:
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] = len(dcos_api_session.masters)
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0
    for unit in all_node_units:
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] = len(
            dcos_api_session.all_slaves + dcos_api_session.masters)
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0
    for unit in slave_units:
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] = len(dcos_api_session.slaves)
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0
    for unit in public_slave_units:
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] \
            = len(dcos_api_session.public_slaves)
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0
    for unit in all_slave_units:
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-total".format(unit)] \
            = len(dcos_api_session.all_slaves)
        exp_data['diagnostics']['properties']["health-unit-dcos-{}-unhealthy".format(unit)] = 0

    def check_signal_data():
        # Check the entire hash of diagnostics data
        assert r_data['diagnostics'] == exp_data['diagnostics']
        # Check a subset of things regarding Mesos that we can logically check for
        framework_names = [x['name'] for x in r_data['mesos']['properties']['frameworks']]
        assert 'marathon' in framework_names
        assert 'metronome' in framework_names
        # There are no packages installed by default on the integration test, ensure the key exists
        assert len(r_data['cosmos']['properties']['package_list']) == 0

    try:
        check_signal_data()
    except AssertionError as err:
        logging.info('System report: {}'.format(direct_report.json()))
        raise err
コード例 #29
0
    def ssh_from_config(self, config):
        """
        In AWS simple deploys, SSH is only used for running the integration test suite.
        In order to use SSH with AWS, one must use an SSH key pair that was previously created
        in and downloaded from AWS. The private key cannot be obtained after its creation, so there
        are three supported possibilities:
        1. User has no preset key pair and would like one to be created for just this instance.
            The keypair will be generated and the private key and key name will be added to the
            ssh_info of the cluster info JSON. This key will be marked in the ssh_info for deletion
            by dcos-launch when the entire cluster is deleted. SSH user name cannot be inferred so
            it must still be provided in the ssh_info of the config
        2. User has a preset AWS KeyPair and no corresponding private key. Testing will not be possible
            without the private key, so ssh_info can be completely omitted with no loss.
        3. User has a preset AWS KeyPair and has the corresponding private key. The private key must be
            pointed to with private_key_path in the config ssh_info along with user.

        Case 2 and 3 require specifying key name. The key name can be provided to dcos-launch in two ways:
        1. Passed directly to the template like other template parameters.
        ---
        parameters:
          - ParameterKey: KeyName
            ParameterValue: my_key_name
        ssh_info:
          user: foo
          private_key_path: path_to_my_key
        2. Provided with the other SSH paramters:
        ---
        ssh_info:
          user: foo
          key_name: my_key_name
          private_key_path: path_to_my_key

        This method will take the config dict, determine if a key name is provided, if necessary, generate
        the key and amend the config, and return the ssh_info for the cluster info JSON
        """
        generate_key = False
        key_name = None
        private_key = None
        # Native AWS parameters take precedence
        if 'parameters' in config:
            for p in config['parameters']:
                if p['ParameterKey'] == 'KeyName':
                    key_name = p['ParameterValue']
        # check ssh_info data
        if 'ssh_info' in config:
            if key_name is None:
                key_name = config['ssh_info'].get('key_name', None)
            elif 'key_name' in config['ssh_info']:
                raise LauncherError(
                    'ValidationError',
                    'Provide key name as either a parameter or in ssh_info; not both'
                )
            if 'private_key_path' in config['ssh_info']:
                if key_name is None:
                    raise LauncherError(
                        'ValidationError',
                        'If a private_key_path is provided, '
                        'then the KeyName template parameter must be set')
                private_key = load_string(
                    config['ssh_info']['private_key_path'])
        if not key_name:
            key_name = config['stack_name']
            generate_key = True
            private_key = self.boto_wrapper.create_key_pair(key_name)
            cf_params = config.get('parameters', list())
            cf_params.append({
                'ParameterKey': 'KeyName',
                'ParameterValue': key_name
            })
            config['parameters'] = cf_params
        return {
            'delete_with_stack': generate_key,
            'private_key': private_key,
            'key_name': key_name,
            'user': config.get('ssh_info', dict()).get('user', None)
        }
コード例 #30
0
def main():
    num_masters = int(os.getenv('MASTERS', '3'))
    num_agents = int(os.getenv('AGENTS', '2'))
    num_public_agents = int(os.getenv('PUBLIC_AGENTS', '1'))
    stack_name = 'upgrade-test-' + ''.join(
        random.choice(string.ascii_uppercase + string.digits)
        for _ in range(10))

    test_cmd = os.getenv('DCOS_PYTEST_CMD',
                         'py.test -vv -rs ' + os.getenv('CI_FLAGS', ''))

    stable_installer_url = os.environ['STABLE_INSTALLER_URL']
    installer_url = os.environ['INSTALLER_URL']

    config_yaml_override_install = os.getenv('CONFIG_YAML_OVERRIDE_INSTALL')
    config_yaml_override_upgrade = os.getenv('CONFIG_YAML_OVERRIDE_UPGRADE')

    vpc, ssh_info = test_util.aws.VpcCfStack.create(
        stack_name=stack_name,
        instance_type='m4.xlarge',
        instance_os='cent-os-7-dcos-prereqs',
        # An instance for each cluster node plus the bootstrap.
        instance_count=(num_masters + num_agents + num_public_agents + 1),
        admin_location='0.0.0.0/0',
        key_pair_name='default',
        boto_wrapper=test_util.aws.BotoWrapper(
            region=os.getenv('DEFAULT_AWS_REGION', 'eu-central-1'),
            aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),
            aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY'),
        ),
    )
    vpc.wait_for_stack_creation()
    cluster = test_util.cluster.Cluster.from_vpc(
        vpc,
        ssh_info,
        ssh_key=load_string(os.getenv('DCOS_SSH_KEY_PATH', 'default_ssh_key')),
        num_masters=num_masters,
        num_agents=num_agents,
        num_public_agents=num_public_agents,
    )

    # Use the CLI installer to set exhibitor_storage_backend = zookeeper.
    test_util.cluster.install_dcos(
        cluster,
        stable_installer_url,
        api=False,
        add_config_path=config_yaml_override_install)

    master_list = [h.private_ip for h in cluster.masters]

    cluster_api = DcosApiSession(
        'http://{ip}'.format(ip=cluster.masters[0].public_ip),
        master_list,
        master_list,
        [h.private_ip for h in cluster.agents],
        [h.private_ip for h in cluster.public_agents],
        "root",  # default_os_user
        auth_user=DcosUser(CI_CREDENTIALS))

    cluster_api.wait_for_dcos()

    with cluster.ssher.tunnel(cluster.bootstrap_host) as bootstrap_host_tunnel:
        bootstrap_host_tunnel.remote_cmd(
            ['sudo', 'rm', '-rf', cluster.ssher.home_dir + '/*'])

    with cluster_workload(cluster_api):
        test_util.cluster.upgrade_dcos(
            cluster,
            installer_url,
            add_config_path=config_yaml_override_upgrade)

    result = test_util.cluster.run_integration_tests(cluster,
                                                     test_cmd=test_cmd)

    if result == 0:
        log.info("Test successful! Deleting VPC if provided in this run.")
        vpc.delete()
    else:
        log.info(
            "Test failed! VPC cluster will remain available for debugging for 2 hour after instantiation."
        )
    sys.exit(result)
コード例 #31
0
ファイル: launch.py プロジェクト: rlugojr/dcos
    def ssh_from_config(self, config):
        """
        In order to deploy AWS instances, one must use an SSH key pair that was
        previously created in and downloaded from AWS. The private key cannot be
        obtained after its creation; this helper exists to remove this hassle.

        This method will take the config dict, scan the parameters for KeyName,
        and should KeyName not be found it generate a key and amend the config.
        Returns two dicts: ssh_info and temporary resources. SSH user cannot be
        inferred, so the user must still provide this explicitly via the field
        'private_key_path'

        Thus, there are 4 possible allowable scenarios:
        ### Result: nothing generated, testing possible ###
        ---
        parameters:
          - ParameterKey: KeyName
            ParameterValue: my_key_name
        ssh_info:
          user: foo
          private_key_path: path_to_my_key

        ### Result: key generated, testing possible ###
        ---
        ssh_info:
          user: foo

        ### Result: nothing generated, testing not possible
        ---
        parameters:
          - ParameterKey: KeyName
            ParameterValue: my_key_name

        ### Result: key generated, testing not possible
        ---
        """
        temp_resources = {}
        key_name = None
        private_key = None
        # Native AWS parameters take precedence
        if 'parameters' in config:
            for p in config['parameters']:
                if p['ParameterKey'] == 'KeyName':
                    key_name = p['ParameterValue']
        # check ssh_info data
        if 'ssh_info' in config and 'private_key_path' in config['ssh_info']:
            if key_name is None:
                raise LauncherError('ValidationError', 'If a private_key_path is provided, '
                                    'then the KeyName template parameter must be set')
            private_key = load_string(config['ssh_info']['private_key_path'])
        if not key_name:
            key_name = config['stack_name']
            private_key = self.boto_wrapper.create_key_pair(key_name)
            temp_resources['key_name'] = key_name
            cf_params = config.get('parameters', list())
            cf_params.append({'ParameterKey': 'KeyName', 'ParameterValue': key_name})
            config['parameters'] = cf_params
        user = config.get('ssh_info', dict()).get('user', None)
        ssh_info = {'private_key': private_key, 'user': user}
        if user is None:
            print('Testing not possible; user must be provided under ssh_info in config YAML')
        if private_key is None:
            print('Testing not possible; private_key_path must be provided under ssh_info in config YAML')
        return ssh_info, temp_resources
コード例 #32
0
ファイル: __init__.py プロジェクト: alberts/dcos
def build_tree(package_store, mkbootstrap, tree_variant):
    """Build packages and bootstrap tarballs for one or all tree variants.

    Returns a dict mapping tree variants to bootstrap IDs.

    If tree_variant is None, builds all available tree variants.

    """
    # TODO(cmaloney): Add support for circular dependencies. They are doable
    # long as there is a pre-built version of enough of the packages.

    # TODO(cmaloney): Make it so when we're building a treeinfo which has a
    # explicit package list we don't build all the other packages.
    build_order = list()
    visited = set()
    built = set()

    def visit(pkg_tuple):
        """Add a package and its requires to the build order.

        Raises AssertionError if pkg_tuple is in the set of visited packages.

        If the package has any requires, they're recursively visited and added
        to the build order depth-first. Then the package itself is added.

        """
        assert isinstance(pkg_tuple, tuple)

        # Visit the node for the first (and only) time.
        assert pkg_tuple not in visited
        visited.add(pkg_tuple)

        # Ensure all dependencies are built. Sorted for stability
        for require in sorted(package_store.packages[pkg_tuple]['requires']):
            require_tuple = expand_require(require)

            # If the dependency has already been built, we can move on.
            if require_tuple in built:
                continue
            # If the dependency has not been built but has been visited, then
            # there's a cycle in the dependency graph.
            if require_tuple in visited:
                raise BuildError("Circular dependency. Circular link {0} -> {1}".format(pkg_tuple, require_tuple))

            if PackageId.is_id(require_tuple[0]):
                raise BuildError("Depending on a specific package id is not supported. Package {} "
                                 "depends on {}".format(pkg_tuple, require_tuple))

            if require_tuple not in package_store.packages:
                raise BuildError("Package {0} require {1} not buildable from tree.".format(pkg_tuple, require_tuple))

            # Add the dependency (after its dependencies, if any) to the build
            # order.
            visit(require_tuple)

        build_order.append(pkg_tuple)
        built.add(pkg_tuple)

    # Can't compare none to string, so expand none -> "true" / "false", then put
    # the string in a field after "" if none, the string if not.
    def key_func(elem):
        return elem[0], elem[1] is None, elem[1] or ""

    def visit_packages(package_tuples):
        for pkg_tuple in sorted(package_tuples, key=key_func):
            if pkg_tuple in visited:
                continue
            visit(pkg_tuple)

    if tree_variant:
        package_sets = [package_store.get_package_set(tree_variant)]
    else:
        package_sets = package_store.get_all_package_sets()

    # Build all required packages for all tree variants.
    for package_set in package_sets:
        visit_packages(package_set.all_packages)

    built_packages = dict()
    for (name, variant) in build_order:
        print("Building: {} variant {}".format(name, pkgpanda.util.variant_str(variant)))
        built_packages.setdefault(name, dict())

        # Run the build, store the built package path for later use.
        # TODO(cmaloney): Only build the requested variants, rather than all variants.
        built_packages[name][variant] = build(
            package_store,
            name,
            variant,
            True)

    # Build bootstrap tarballs for all tree variants.
    def make_bootstrap(package_set):
        print("Making bootstrap variant:", pkgpanda.util.variant_name(package_set.variant))
        package_paths = list()
        for name, pkg_variant in package_set.bootstrap_packages:
            package_paths.append(built_packages[name][pkg_variant])

        if mkbootstrap:
            return make_bootstrap_tarball(
                package_store,
                list(sorted(package_paths)),
                package_set.variant)

    # Build bootstraps and and package lists for all variants.
    # TODO(cmaloney): Allow distinguishing between "build all" and "build the default one".
    complete_cache_dir = package_store.get_complete_cache_dir()
    check_call(['mkdir', '-p', complete_cache_dir])
    results = {}
    for package_set in package_sets:
        info = {
            'bootstrap': make_bootstrap(package_set),
            'packages': sorted(
                load_string(package_store.get_last_build_filename(*pkg_tuple))
                for pkg_tuple in package_set.all_packages)}
        write_json(
            complete_cache_dir + '/' + pkgpanda.util.variant_prefix(package_set.variant) + 'complete.latest.json',
            info)
        results[package_set.variant] = info

    return results
コード例 #33
0
def get_zk_pid():
    return load_string(zk_pid_path)
コード例 #34
0
ファイル: test_upgrade_vpc.py プロジェクト: cmaloney/dcos
def main():
    num_masters = int(os.getenv('MASTERS', '3'))
    num_agents = int(os.getenv('AGENTS', '2'))
    num_public_agents = int(os.getenv('PUBLIC_AGENTS', '1'))
    stack_name = 'upgrade-test-' + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))

    test_cmd = os.getenv('DCOS_PYTEST_CMD', 'py.test -vv -s -rs ' + os.getenv('CI_FLAGS', ''))

    stable_installer_url = os.environ['STABLE_INSTALLER_URL']
    installer_url = os.environ['INSTALLER_URL']

    vpc, ssh_info = test_util.aws.VpcCfStack.create(
        stack_name=stack_name,
        instance_type='m4.xlarge',
        instance_os='cent-os-7-dcos-prereqs',
        # An instance for each cluster node plus the bootstrap.
        instance_count=(num_masters + num_agents + num_public_agents + 1),
        admin_location='0.0.0.0/0',
        key_pair_name='default',
        boto_wrapper=test_util.aws.BotoWrapper(
            region=os.getenv('DEFAULT_AWS_REGION', 'eu-central-1'),
            aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),
            aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY'),
        ),
    )
    vpc.wait_for_stack_creation()
    cluster = test_util.cluster.Cluster.from_vpc(
        vpc,
        ssh_info,
        ssh_key=load_string(os.getenv('DCOS_SSH_KEY_PATH', 'default_ssh_key')),
        num_masters=num_masters,
        num_agents=num_agents,
        num_public_agents=num_public_agents,
    )

    # Use the CLI installer to set exhibitor_storage_backend = zookeeper.
    test_util.cluster.install_dcos(cluster, stable_installer_url, api=False)

    master_list = [h.private_ip for h in cluster.masters]

    cluster_api = ClusterApi(
        'http://{ip}'.format(ip=cluster.masters[0].public_ip),
        master_list,
        master_list,
        [h.private_ip for h in cluster.agents],
        [h.private_ip for h in cluster.public_agents],
        "root",             # default_os_user
        web_auth_default_user=DcosUser(CI_AUTH_JSON),
        ca_cert_path=None)

    cluster_api.wait_for_dcos()

    # Deploy an app
    cluster_api.marathon.deploy_app(get_test_app())

    task_info_before_upgrade = get_task_info(cluster_api.marathon.get('v2/apps').json(),
                                             cluster_api.marathon.get('v2/tasks').json())

    assert task_info_before_upgrade is not None, "Unable to get task details of the cluster."
    assert task_info_before_upgrade.state == "TASK_RUNNING", "Task is not in the running state."

    with cluster.ssher.tunnel(cluster.bootstrap_host) as bootstrap_host_tunnel:
        bootstrap_host_tunnel.remote_cmd(['sudo', 'rm', '-rf', cluster.ssher.home_dir + '/*'])

    test_util.cluster.upgrade_dcos(cluster, installer_url)

    task_info_after_upgrade = get_task_info(cluster_api.marathon.get('v2/apps').json(),
                                            cluster_api.marathon.get('v2/tasks').json())

    assert task_info_after_upgrade is not None, "Unable to get the tasks details of the cluster."
    assert task_info_after_upgrade.state == "TASK_RUNNING", "Task is not in the running state."

    assert task_info_before_upgrade.id == task_info_after_upgrade.id, \
        "Task ID before and after the upgrade did not match."

    # There has happened at least one health-check in the new cluster since the last health-check in the old cluster.
    assert (task_info_after_upgrade.last_success_time >
            task_info_before_upgrade.last_success_time + task_info_before_upgrade.health_check_interval), \
        "Invalid health-check for the task in the upgraded cluster."

    result = test_util.cluster.run_integration_tests(cluster, test_cmd=test_cmd)

    if result == 0:
        log.info("Test successsful! Deleting VPC if provided in this run...")
        vpc.delete()
    else:
        log.info("Test failed! VPC cluster will remain available for debugging for 2 hour after instantiation.")
    sys.exit(result)
コード例 #35
0
ファイル: config.py プロジェクト: tamarrow/dcos
def calculate_ssh_private_key(ssh_private_key_filename):
    if ssh_private_key_filename == '':
        return launch.util.NO_TEST_FLAG
    return load_string(ssh_private_key_filename)