コード例 #1
0
def get_dcosconfig_source_target_and_templates(
        user_arguments: dict,
        extra_templates: List[str],
        extra_sources: List[gen.internals.Source]):
    log.info("Generating configuration files...")

    # TODO(cmaloney): Make these all just defined by the base calc.py
    config_package_names = ['dcos-config', 'dcos-metadata']
    template_filenames = ['dcos-config.yaml', 'cloud-config.yaml', 'dcos-metadata.yaml', 'dcos-services.yaml']

    # TODO(cmaloney): Check there are no duplicates between templates and extra_template_files
    template_filenames += extra_templates

    # Re-arrange templates to be indexed by common name. Only allow multiple for one key if the key
    # is yaml (ends in .yaml).
    templates = dict()
    for filename in template_filenames:
        key = os.path.basename(filename)
        templates.setdefault(key, list())
        templates[key].append(filename)

        if len(templates[key]) > 1 and not key.endswith('.yaml'):
            raise Exception(
                "Internal Error: Only know how to merge YAML templates at this point in time. "
                "Can't merge template {} in template_list {}".format(filename, templates[key]))

    targets = target_from_templates(templates)
    base_source = gen.internals.Source(is_user=False)
    base_source.add_entry(gen.calc.entry, replace_existing=False)

    if gen_extra_calc:
        base_source.add_entry(gen_extra_calc.entry, replace_existing=True)

    def add_builtin(name, value):
        base_source.add_must(name, json_prettyprint(value))

    sources = [base_source, user_arguments_to_source(user_arguments)] + extra_sources

    # Add builtin variables.
    # TODO(cmaloney): Hash the contents of all the templates rather than using the list of filenames
    # since the filenames might not live in this git repo, or may be locally modified.
    add_builtin('template_filenames', template_filenames)
    add_builtin('config_package_names', list(config_package_names))

    # Add placeholders for builtin variables whose values will be calculated after all others, so that we won't get
    # unset argument errors. The placeholder value with be replaced with the actual value after all other variables are
    # calculated.
    temporary_str = 'DO NOT USE THIS AS AN ARGUMENT TO OTHER ARGUMENTS. IT IS TEMPORARY'
    add_builtin('user_arguments_full', temporary_str)
    add_builtin('user_arguments', temporary_str)
    add_builtin('config_yaml_full', temporary_str)
    add_builtin('config_yaml', temporary_str)
    add_builtin('expanded_config', temporary_str)
    add_builtin('expanded_config_full', temporary_str)

    # Note: must come last so the hash of the "base_source" this is beign added to contains all the
    # variables but this.
    add_builtin('sources_id', hash_checkout([hash_checkout(source.make_id()) for source in sources]))

    return sources, targets, templates
コード例 #2
0
ファイル: internals.py プロジェクト: bernadinm/dcos
 def make_id(self):
     # {key: [hash_checkout(setter.make_id() for setter in setters)]
     #                 for key, setters in self.setters.items()}
     setter_ids = list()
     for setter_list in self.setters.values():
         for setter in setter_list:
             setter_ids.append(hash_checkout(setter.make_id()))
     return {
         'setters': setter_ids,
         'validate': [hash_checkout(function_id(fn)) for fn in self.validate],
         'is_user': self.is_user
     }
コード例 #3
0
 def make_id(self):
     # {key: [hash_checkout(setter.make_id() for setter in setters)]
     #                 for key, setters in self.setters.items()}
     setter_ids = list()
     for setter_list in self.setters.values():
         for setter in setter_list:
             setter_ids.append(hash_checkout(setter.make_id()))
     return {
         'setters': setter_ids,
         'validate':
         [hash_checkout(function_id(fn)) for fn in self.validate],
         'is_user': self.is_user
     }
コード例 #4
0
    def __init__(self, name: str, value: Union[str, Callable,
                                               Late], is_optional: bool,
                 conditions: List[Tuple[str, str]], is_user: bool):
        self.name = name
        self.is_optional = is_optional
        self.conditions = conditions
        self.is_user = is_user
        self._value_id = hash_checkout(value_id(value))

        def get_value():
            return value

        if isinstance(value, str):
            self.calc = get_value
            self.parameters = set()
            self.is_late = False
        elif isinstance(value, Late):
            self.calc = late_bound_raise
            self.parameters = set()
            self.is_late = True
            self.late_expression = value.expression
        else:
            assert callable(
                value), "{} should be a string or callable. Got: {}".format(
                    name, value)
            self.calc = value
            self.parameters = get_function_parameters(value)
            self.is_late = False
コード例 #5
0
ファイル: internals.py プロジェクト: bernadinm/dcos
    def __init__(
            self,
            name: str,
            value: Union[str, Callable, Late],
            is_optional: bool,
            conditions: List[Tuple[str, str]],
            is_user: bool):
        self.name = name
        self.is_optional = is_optional
        self.conditions = conditions
        self.is_user = is_user
        self._value_id = hash_checkout(value_id(value))

        def get_value():
            return value

        if isinstance(value, str):
            self.calc = get_value
            self.parameters = set()
            self.is_late = False
        elif isinstance(value, Late):
            self.calc = late_bound_raise
            self.parameters = set()
            self.is_late = True
            self.late_expression = value.expression
        else:
            assert callable(value), "{} should be a string or callable. Got: {}".format(name, value)
            self.calc = value
            self.parameters = get_function_parameters(value)
            self.is_late = False
コード例 #6
0
ファイル: __init__.py プロジェクト: dcos/dcos
def get_config_id(argument_dict: dict):
    """Return a unique ID for the configuration represented by argument_dict."""
    # dcos_image_commit and template_filenames should be included in argument_dict, but we reference them explicitly to
    # ensure that the config ID reflects the current commit and config templates.
    return hash_checkout({
        'commit': argument_dict['dcos_image_commit'],
        'template_filenames': argument_dict['template_filenames'],
        'argument_dict': argument_dict,
    })
コード例 #7
0
def get_config_id(argument_dict: dict):
    """Return a unique ID for the configuration represented by argument_dict."""
    # dcos_image_commit and template_filenames should be included in argument_dict, but we reference them explicitly to
    # ensure that the config ID reflects the current commit and config templates.
    return hash_checkout({
        'commit': argument_dict['dcos_image_commit'],
        'template_filenames': argument_dict['template_filenames'],
        'argument_dict': argument_dict,
    })
コード例 #8
0
def make_bootstrap_tarball(package_store, packages, variant):
    # Convert filenames to package ids
    pkg_ids = list()
    for pkg_path in packages:
        # Get the package id from the given package path
        filename = os.path.basename(pkg_path)
        if not filename.endswith(".tar.xz"):
            raise BuildError("Packages must be packaged / end with a .tar.xz. Got {}".format(filename))
        pkg_id = filename[:-len(".tar.xz")]
        pkg_ids.append(pkg_id)

    bootstrap_cache_dir = package_store.get_bootstrap_cache_dir()

    # Filename is output_name.<sha-1>.{active.json|.bootstrap.tar.xz}
    bootstrap_id = hash_checkout(pkg_ids)
    latest_name = "{}/{}bootstrap.latest".format(bootstrap_cache_dir, pkgpanda.util.variant_prefix(variant))

    output_name = bootstrap_cache_dir + '/' + bootstrap_id + '.'

    # bootstrap tarball = <sha1 of packages in tarball>.bootstrap.tar.xz
    bootstrap_name = "{}bootstrap.tar.xz".format(output_name)
    active_name = "{}active.json".format(output_name)

    def mark_latest():
        # Ensure latest is always written
        write_string(latest_name, bootstrap_id)

        print("bootstrap: {}".format(bootstrap_name))
        print("active: {}".format(active_name))
        print("latest: {}".format(latest_name))
        return bootstrap_id

    if (os.path.exists(bootstrap_name)):
        print("Bootstrap already up to date, not recreating")
        return mark_latest()

    check_call(['mkdir', '-p', bootstrap_cache_dir])

    # Try downloading.
    if package_store.try_fetch_bootstrap_and_active(bootstrap_id):
        print("Bootstrap already up to date, Not recreating. Downloaded from repository-url.")
        return mark_latest()

    print("Unable to download from cache. Building.")

    print("Creating bootstrap tarball for variant {}".format(variant))

    work_dir = tempfile.mkdtemp(prefix='mkpanda_bootstrap_tmp')

    def make_abs(path):
        return os.path.join(work_dir, path)

    pkgpanda_root = make_abs("opt/mesosphere")
    repository = Repository(os.path.join(pkgpanda_root, "packages"))

    # Fetch all the packages to the root
    for pkg_path in packages:
        filename = os.path.basename(pkg_path)
        pkg_id = filename[:-len(".tar.xz")]

        def local_fetcher(id, target):
            shutil.unpack_archive(pkg_path, target, "gztar")
        repository.add(local_fetcher, pkg_id, False)

    # Activate the packages inside the repository.
    # Do generate dcos.target.wants inside the root so that we don't
    # try messing with /etc/systemd/system.
    install = Install(
        root=pkgpanda_root,
        config_dir=None,
        rooted_systemd=True,
        manage_systemd=False,
        block_systemd=True,
        fake_path=True,
        skip_systemd_dirs=True,
        manage_users=False,
        manage_state_dir=False)
    install.activate(repository.load_packages(pkg_ids))

    # Mark the tarball as a bootstrap tarball/filesystem so that
    # dcos-setup.service will fire.
    make_file(make_abs("opt/mesosphere/bootstrap"))

    # Write out an active.json for the bootstrap tarball
    write_json(active_name, pkg_ids)

    # Rewrite all the symlinks to point to /opt/mesosphere
    rewrite_symlinks(work_dir, work_dir, "/")

    make_tar(bootstrap_name, pkgpanda_root)

    shutil.rmtree(work_dir)

    # Update latest last so that we don't ever use partially-built things.
    write_string(latest_name, bootstrap_id)

    print("Built bootstrap")
    return mark_latest()
コード例 #9
0
ファイル: __init__.py プロジェクト: dcos/dcos
def generate(
        arguments,
        extra_templates=list(),
        extra_sources=list(),
        extra_targets=list()):
    # To maintain the old API where we passed arguments rather than the new name.
    user_arguments = arguments
    arguments = None

    sources, targets, templates = get_dcosconfig_source_target_and_templates(
        user_arguments, extra_templates, extra_sources)

    resolver = validate_and_raise(sources, targets + extra_targets)
    argument_dict = get_final_arguments(resolver)
    late_variables = get_late_variables(resolver, sources)
    secret_builtins = ['expanded_config_full', 'user_arguments_full', 'config_yaml_full']
    secret_variables = set(get_secret_variables(sources) + secret_builtins)
    masked_value = '**HIDDEN**'

    # Calculate config ID after all variables are resolved, to make sure any change in config yields a new config ID.
    config_id = get_config_id(argument_dict)

    # Calculate values that depend on the config ID.
    config_package_names = json.loads(argument_dict['config_package_names'])
    package_ids = json.loads(argument_dict['package_ids'])
    config_package_ids = ['{}--setup_{}'.format(name, config_id) for name in config_package_names]
    cluster_packages = sorted(config_package_ids + package_ids)
    validate_cluster_packages(cluster_packages)
    cluster_package_list_id = hash_checkout(cluster_packages)

    # Calculate values for builtin variables.
    argument_dict['cluster_packages'] = json.dumps(cluster_packages)
    argument_dict['cluster_package_list_id'] = cluster_package_list_id
    user_arguments_masked = {k: (masked_value if k in secret_variables else v) for k, v in user_arguments.items()}
    argument_dict['user_arguments_full'] = json_prettyprint(user_arguments)
    argument_dict['user_arguments'] = json_prettyprint(user_arguments_masked)
    argument_dict['config_yaml_full'] = user_arguments_to_yaml(user_arguments)
    argument_dict['config_yaml'] = user_arguments_to_yaml(user_arguments_masked)

    # The expanded_config and expanded_config_full variables contain all other variables and their values.
    # expanded_config is a copy of expanded_config_full with secret values removed. Calculating these variables' values
    # must come after the calculation of all other variables to prevent infinite recursion.
    # TODO(cmaloney): Make this late-bound by gen.internals
    expanded_config_full = {
        k: v for k, v in argument_dict.items()
        # Omit late-bound variables whose values have not yet been calculated.
        if not v.startswith(gen.internals.LATE_BIND_PLACEHOLDER_START)
    }
    expanded_config_scrubbed = {k: v for k, v in expanded_config_full.items() if k not in secret_variables}
    argument_dict['expanded_config_full'] = format_expanded_config(expanded_config_full)
    argument_dict['expanded_config'] = format_expanded_config(expanded_config_scrubbed)

    log.debug(
        "Final arguments:" + json_prettyprint({
            # Mask secret config values.
            k: (masked_value if k in secret_variables else v) for k, v in argument_dict.items()
        })
    )

    # Fill in the template parameters
    # TODO(cmaloney): render_templates should ideally take the template targets.
    rendered_templates = render_templates(templates, argument_dict)

    # Validate there aren't any unexpected top level directives in any of the files
    # (likely indicates a misspelling)
    for name, template in rendered_templates.items():
        if name == dcos_services_yaml:  # yaml list of the service files
            assert isinstance(template, list)
        elif name == cloud_config_yaml:
            assert template.keys() <= CLOUDCONFIG_KEYS, template.keys()
        elif isinstance(template, str):  # Not a yaml template
            pass
        else:  # yaml template file
            log.debug("validating template file %s", name)
            assert template.keys() <= PACKAGE_KEYS, template.keys()

    stable_artifacts = []
    channel_artifacts = []

    # Find all files which contain late bind variables and turn them into a "late bind package"
    # TODO(cmaloney): check there are no late bound variables in cloud-config.yaml
    late_files, regular_files = extract_files_containing_late_variables(
        rendered_templates[dcos_config_yaml]['package'])
    # put the regular files right back
    rendered_templates[dcos_config_yaml] = {'package': regular_files}

    # Render cluster package list artifact.
    cluster_package_list_filename = 'package_lists/{}.package_list.json'.format(cluster_package_list_id)
    os.makedirs(os.path.dirname(cluster_package_list_filename), mode=0o755, exist_ok=True)
    write_string(cluster_package_list_filename, argument_dict['cluster_packages'])
    log.info('Cluster package list: {}'.format(cluster_package_list_filename))
    stable_artifacts.append(cluster_package_list_filename)

    def make_package_filename(package_id, extension):
        return 'packages/{0}/{1}{2}'.format(
            package_id.name,
            repr(package_id),
            extension)

    # Render all the cluster packages
    cluster_package_info = {}

    # Prepare late binding config, if any.
    late_package = build_late_package(late_files, config_id, argument_dict['provider'])
    if late_variables and late_package:
        # Render the late binding package. This package will be downloaded onto
        # each cluster node during bootstrap and rendered into the final config
        # using the values from the late config file.
        late_package_id = PackageId(late_package['name'])
        late_package_filename = make_package_filename(late_package_id, '.dcos_config')
        os.makedirs(os.path.dirname(late_package_filename), mode=0o755)
        write_yaml(late_package_filename, {'package': late_package['package']}, default_flow_style=False)
        log.info('Package filename: {}'.format(late_package_filename))
        stable_artifacts.append(late_package_filename)

        # Add the late config file to cloud config. The expressions in
        # late_variables will be resolved by the service handling the cloud
        # config (e.g. Amazon CloudFormation). The rendered late config file
        # on a cluster node's filesystem will contain the final values.
        rendered_templates[cloud_config_yaml]['root'].append({
            'path': config_dir + '/setup-flags/late-config.yaml',
            'permissions': '0644',
            'owner': 'root',
            # TODO(cmaloney): don't prettyprint to save bytes.
            # NOTE: Use yaml here simply to make avoiding painful escaping and
            # unescaping easier.
            'content': render_yaml({
                'late_bound_package_id': late_package['name'],
                'bound_values': late_variables
            })})

    # Collect metadata for cluster packages.
    for package_id_str in cluster_packages:
        package_id = PackageId(package_id_str)
        package_filename = make_package_filename(package_id, '.tar.xz')

        cluster_package_info[package_id.name] = {
            'id': package_id_str,
            'filename': package_filename
        }

    # Render config packages.
    for package_id_str in config_package_ids:
        package_id = PackageId(package_id_str)
        package_filename = cluster_package_info[package_id.name]['filename']
        do_gen_package(rendered_templates[package_id.name + '.yaml'], cluster_package_info[package_id.name]['filename'])
        stable_artifacts.append(package_filename)

    # Convert cloud-config to just contain write_files rather than root
    cc = rendered_templates[cloud_config_yaml]

    # Shouldn't contain any packages. Providers should pull what they need to
    # late bind out of other packages via cc_package_file.
    assert 'package' not in cc
    cc_root = cc.pop('root', [])
    # Make sure write_files exists.
    assert 'write_files' not in cc
    cc['write_files'] = []
    # Do the transform
    for item in cc_root:
        assert is_absolute_path(item['path'])
        cc['write_files'].append(item)
    rendered_templates[cloud_config_yaml] = cc

    # Add utils that need to be defined here so they can be bound to locals.
    def add_services(cloudconfig, cloud_init_implementation):
        return add_units(cloudconfig, rendered_templates[dcos_services_yaml], cloud_init_implementation)

    utils.add_services = add_services

    def add_stable_artifact(filename):
        assert filename not in stable_artifacts + channel_artifacts
        stable_artifacts.append(filename)

    utils.add_stable_artifact = add_stable_artifact

    def add_channel_artifact(filename):
        assert filename not in stable_artifacts + channel_artifacts
        channel_artifacts.append(filename)

    utils.add_channel_artifact = add_channel_artifact

    return Bunch({
        'arguments': argument_dict,
        'cluster_packages': cluster_package_info,
        'stable_artifacts': stable_artifacts,
        'channel_artifacts': channel_artifacts,
        'templates': rendered_templates,
        'utils': utils
    })
コード例 #10
0
ファイル: __init__.py プロジェクト: bernadinm/dcos
def _build(package_store, name, variant, clean_after_build, recursive):
    assert isinstance(package_store, PackageStore)
    tmpdir = tempfile.TemporaryDirectory(prefix="pkgpanda_repo")
    repository = Repository(tmpdir.name)

    package_dir = package_store.get_package_folder(name)

    def src_abs(name):
        return package_dir + '/' + name

    def cache_abs(filename):
        return package_store.get_package_cache_folder(name) + '/' + filename

    # Build pkginfo over time, translating fields from buildinfo.
    pkginfo = {}

    # Build up the docker command arguments over time, translating fields as needed.
    cmd = DockerCmd()

    assert (name, variant) in package_store.packages, \
        "Programming error: name, variant should have been validated to be valid before calling build()."

    builder = IdBuilder(package_store.get_buildinfo(name, variant))
    final_buildinfo = dict()

    builder.add('name', name)
    builder.add('variant', pkgpanda.util.variant_str(variant))

    # Convert single_source -> sources
    if builder.has('sources'):
        if builder.has('single_source'):
            raise BuildError('Both sources and single_source cannot be specified at the same time')
        sources = builder.take('sources')
    elif builder.has('single_source'):
        sources = {name: builder.take('single_source')}
        builder.replace('single_source', 'sources', sources)
    else:
        builder.add('sources', {})
        sources = dict()
        print("NOTICE: No sources specified")

    final_buildinfo['sources'] = sources

    # Construct the source fetchers, gather the checkout ids from them
    checkout_ids = dict()
    fetchers = dict()
    try:
        for src_name, src_info in sorted(sources.items()):
            # TODO(cmaloney): Switch to a unified top level cache directory shared by all packages
            cache_dir = package_store.get_package_cache_folder(name) + '/' + src_name
            check_call(['mkdir', '-p', cache_dir])
            fetcher = get_src_fetcher(src_info, cache_dir, package_dir)
            fetchers[src_name] = fetcher
            checkout_ids[src_name] = fetcher.get_id()
    except ValidationError as ex:
        raise BuildError("Validation error when fetching sources for package: {}".format(ex))

    for src_name, checkout_id in checkout_ids.items():
        # NOTE: single_source buildinfo was expanded above so the src_name is
        # always correct here.
        # Make sure we never accidentally overwrite something which might be
        # important. Fields should match if specified (And that should be
        # tested at some point). For now disallowing identical saves hassle.
        assert_no_duplicate_keys(checkout_id, final_buildinfo['sources'][src_name])
        final_buildinfo['sources'][src_name].update(checkout_id)

    # Add the sha1 of the buildinfo.json + build file to the build ids
    builder.update('sources', checkout_ids)
    build_script = src_abs(builder.take('build_script'))
    # TODO(cmaloney): Change dest name to build_script_sha1
    builder.replace('build_script', 'build', pkgpanda.util.sha1(build_script))
    builder.add('pkgpanda_version', pkgpanda.build.constants.version)

    extra_dir = src_abs("extra")
    # Add the "extra" folder inside the package as an additional source if it
    # exists
    if os.path.exists(extra_dir):
        extra_id = hash_folder_abs(extra_dir, package_dir)
        builder.add('extra_source', extra_id)
        final_buildinfo['extra_source'] = extra_id

    # Figure out the docker name.
    docker_name = builder.take('docker')
    cmd.container = docker_name

    # Add the id of the docker build environment to the build_ids.
    try:
        docker_id = get_docker_id(docker_name)
    except CalledProcessError:
        # docker pull the container and try again
        check_call(['docker', 'pull', docker_name])
        docker_id = get_docker_id(docker_name)

    builder.update('docker', docker_id)

    # TODO(cmaloney): The environment variables should be generated during build
    # not live in buildinfo.json.
    pkginfo['environment'] = builder.take('environment')

    # Whether pkgpanda should on the host make sure a `/var/lib` state directory is available
    pkginfo['state_directory'] = builder.take('state_directory')
    if pkginfo['state_directory'] not in [True, False]:
        raise BuildError("state_directory in buildinfo.json must be a boolean `true` or `false`")

    username = None
    if builder.has('username'):
        username = builder.take('username')
        if not isinstance(username, str):
            raise BuildError("username in buildinfo.json must be either not set (no user for this"
                             " package), or a user name string")
        try:
            pkgpanda.UserManagement.validate_username(username)
        except ValidationError as ex:
            raise BuildError("username in buildinfo.json didn't meet the validation rules. {}".format(ex))
        pkginfo['username'] = username

    group = None
    if builder.has('group'):
        group = builder.take('group')
        if not isinstance(group, str):
            raise BuildError("group in buildinfo.json must be either not set (use default group for this user)"
                             ", or group must be a string")
        try:
            pkgpanda.UserManagement.validate_group_name(group)
        except ValidationError as ex:
            raise BuildError("group in buildinfo.json didn't meet the validation rules. {}".format(ex))
        pkginfo['group'] = group

    # Packages need directories inside the fake install root (otherwise docker
    # will try making the directories on a readonly filesystem), so build the
    # install root now, and make the package directories in it as we go.
    install_dir = tempfile.mkdtemp(prefix="pkgpanda-")

    active_packages = list()
    active_package_ids = set()
    active_package_variants = dict()
    auto_deps = set()

    # Final package has the same requires as the build.
    requires = builder.take('requires')
    pkginfo['requires'] = requires

    if builder.has("sysctl"):
        pkginfo["sysctl"] = builder.take("sysctl")

    # TODO(cmaloney): Pull generating the full set of requires a function.
    to_check = copy.deepcopy(requires)
    if type(to_check) != list:
        raise BuildError("`requires` in buildinfo.json must be an array of dependencies.")
    while to_check:
        requires_info = to_check.pop(0)
        requires_name, requires_variant = expand_require(requires_info)

        if requires_name in active_package_variants:
            # TODO(cmaloney): If one package depends on the <default>
            # variant of a package and 1+ others depends on a non-<default>
            # variant then update the dependency to the non-default variant
            # rather than erroring.
            if requires_variant != active_package_variants[requires_name]:
                # TODO(cmaloney): Make this contain the chains of
                # dependencies which contain the conflicting packages.
                # a -> b -> c -> d {foo}
                # e {bar} -> d {baz}
                raise BuildError(
                    "Dependncy on multiple variants of the same package {}. variants: {} {}".format(
                        requires_name,
                        requires_variant,
                        active_package_variants[requires_name]))

            # The variant has package {requires_name, variant} already is a
            # dependency, don't process it again / move on to the next.
            continue

        active_package_variants[requires_name] = requires_variant

        # Figure out the last build of the dependency, add that as the
        # fully expanded dependency.
        requires_last_build = package_store.get_last_build_filename(requires_name, requires_variant)
        if not os.path.exists(requires_last_build):
            if recursive:
                # Build the dependency
                build(package_store, requires_name, requires_variant, clean_after_build, recursive)
            else:
                raise BuildError("No last build file found for dependency {} variant {}. Rebuild "
                                 "the dependency".format(requires_name, requires_variant))

        try:
            pkg_id_str = load_string(requires_last_build)
            auto_deps.add(pkg_id_str)
            pkg_buildinfo = package_store.get_buildinfo(requires_name, requires_variant)
            pkg_requires = pkg_buildinfo['requires']
            pkg_path = repository.package_path(pkg_id_str)
            pkg_tar = pkg_id_str + '.tar.xz'
            if not os.path.exists(package_store.get_package_cache_folder(requires_name) + '/' + pkg_tar):
                raise BuildError(
                    "The build tarball {} refered to by the last_build file of the dependency {} "
                    "variant {} doesn't exist. Rebuild the dependency.".format(
                        pkg_tar,
                        requires_name,
                        requires_variant))

            active_package_ids.add(pkg_id_str)

            # Mount the package into the docker container.
            cmd.volumes[pkg_path] = "/opt/mesosphere/packages/{}:ro".format(pkg_id_str)
            os.makedirs(os.path.join(install_dir, "packages/{}".format(pkg_id_str)))

            # Add the dependencies of the package to the set which will be
            # activated.
            # TODO(cmaloney): All these 'transitive' dependencies shouldn't
            # be available to the package being built, only what depends on
            # them directly.
            to_check += pkg_requires
        except ValidationError as ex:
            raise BuildError("validating package needed as dependency {0}: {1}".format(requires_name, ex)) from ex
        except PackageError as ex:
            raise BuildError("loading package needed as dependency {0}: {1}".format(requires_name, ex)) from ex

    # Add requires to the package id, calculate the final package id.
    # NOTE: active_packages isn't fully constructed here since we lazily load
    # packages not already in the repository.
    builder.update('requires', list(active_package_ids))
    version_extra = None
    if builder.has('version_extra'):
        version_extra = builder.take('version_extra')

    build_ids = builder.get_build_ids()
    version_base = hash_checkout(build_ids)
    version = None
    if builder.has('version_extra'):
        version = "{0}-{1}".format(version_extra, version_base)
    else:
        version = version_base
    pkg_id = PackageId.from_parts(name, version)

    # Everything must have been extracted by now. If it wasn't, then we just
    # had a hard error that it was set but not used, as well as didn't include
    # it in the caluclation of the PackageId.
    builder = None

    # Save the build_ids. Useful for verify exactly what went into the
    # package build hash.
    final_buildinfo['build_ids'] = build_ids
    final_buildinfo['package_version'] = version

    # Save the package name and variant. The variant is used when installing
    # packages to validate dependencies.
    final_buildinfo['name'] = name
    final_buildinfo['variant'] = variant

    # If the package is already built, don't do anything.
    pkg_path = package_store.get_package_cache_folder(name) + '/{}.tar.xz'.format(pkg_id)

    # Done if it exists locally
    if exists(pkg_path):
        print("Package up to date. Not re-building.")

        # TODO(cmaloney): Updating / filling last_build should be moved out of
        # the build function.
        write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))

        return pkg_path

    # Try downloading.
    dl_path = package_store.try_fetch_by_id(pkg_id)
    if dl_path:
        print("Package up to date. Not re-building. Downloaded from repository-url.")
        # TODO(cmaloney): Updating / filling last_build should be moved out of
        # the build function.
        write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))
        print(dl_path, pkg_path)
        assert dl_path == pkg_path
        return pkg_path

    # Fall out and do the build since it couldn't be downloaded
    print("Unable to download from cache. Proceeding to build")

    print("Building package {} with buildinfo: {}".format(
        pkg_id,
        json.dumps(final_buildinfo, indent=2, sort_keys=True)))

    # Clean out src, result so later steps can use them freely for building.
    def clean():
        # Run a docker container to remove src/ and result/
        cmd = DockerCmd()
        cmd.volumes = {
            package_store.get_package_cache_folder(name): "/pkg/:rw",
        }
        cmd.container = "ubuntu:14.04.4"
        cmd.run("package-cleaner", ["rm", "-rf", "/pkg/src", "/pkg/result"])

    clean()

    # Only fresh builds are allowed which don't overlap existing artifacts.
    result_dir = cache_abs("result")
    if exists(result_dir):
        raise BuildError("result folder must not exist. It will be made when the package is "
                         "built. {}".format(result_dir))

    # 'mkpanda add' all implicit dependencies since we actually need to build.
    for dep in auto_deps:
        print("Auto-adding dependency: {}".format(dep))
        # NOTE: Not using the name pkg_id because that overrides the outer one.
        id_obj = PackageId(dep)
        add_package_file(repository, package_store.get_package_path(id_obj))
        package = repository.load(dep)
        active_packages.append(package)

    # Checkout all the sources int their respective 'src/' folders.
    try:
        src_dir = cache_abs('src')
        if os.path.exists(src_dir):
            raise ValidationError(
                "'src' directory already exists, did you have a previous build? " +
                "Currently all builds must be from scratch. Support should be " +
                "added for re-using a src directory when possible. src={}".format(src_dir))
        os.mkdir(src_dir)
        for src_name, fetcher in sorted(fetchers.items()):
            root = cache_abs('src/' + src_name)
            os.mkdir(root)

            fetcher.checkout_to(root)
    except ValidationError as ex:
        raise BuildError("Validation error when fetching sources for package: {}".format(ex))

    # Activate the packages so that we have a proper path, environment
    # variables.
    # TODO(cmaloney): RAII type thing for temproary directory so if we
    # don't get all the way through things will be cleaned up?
    install = Install(
        root=install_dir,
        config_dir=None,
        rooted_systemd=True,
        manage_systemd=False,
        block_systemd=True,
        fake_path=True,
        manage_users=False,
        manage_state_dir=False)
    install.activate(active_packages)
    # Rewrite all the symlinks inside the active path because we will
    # be mounting the folder into a docker container, and the absolute
    # paths to the packages will change.
    # TODO(cmaloney): This isn't very clean, it would be much nicer to
    # just run pkgpanda inside the package.
    rewrite_symlinks(install_dir, repository.path, "/opt/mesosphere/packages/")

    print("Building package in docker")

    # TODO(cmaloney): Run as a specific non-root user, make it possible
    # for non-root to cleanup afterwards.
    # Run the build, prepping the environment as necessary.
    mkdir(cache_abs("result"))

    # Copy the build info to the resulting tarball
    write_json(cache_abs("src/buildinfo.full.json"), final_buildinfo)
    write_json(cache_abs("result/buildinfo.full.json"), final_buildinfo)

    write_json(cache_abs("result/pkginfo.json"), pkginfo)

    # Make the folder for the package we are building. If docker does it, it
    # gets auto-created with root permissions and we can't actually delete it.
    os.makedirs(os.path.join(install_dir, "packages", str(pkg_id)))

    # TOOD(cmaloney): Disallow writing to well known files and directories?
    # Source we checked out
    cmd.volumes.update({
        # TODO(cmaloney): src should be read only...
        cache_abs("src"): "/pkg/src:rw",
        # The build script
        build_script: "/pkg/build:ro",
        # Getting the result out
        cache_abs("result"): "/opt/mesosphere/packages/{}:rw".format(pkg_id),
        install_dir: "/opt/mesosphere:ro"
    })

    if os.path.exists(extra_dir):
        cmd.volumes[extra_dir] = "/pkg/extra:ro"

    cmd.environment = {
        "PKG_VERSION": version,
        "PKG_NAME": name,
        "PKG_ID": pkg_id,
        "PKG_PATH": "/opt/mesosphere/packages/{}".format(pkg_id),
        "PKG_VARIANT": variant if variant is not None else "<default>",
        "NUM_CORES": multiprocessing.cpu_count()
    }

    try:
        # TODO(cmaloney): Run a wrapper which sources
        # /opt/mesosphere/environment then runs a build. Also should fix
        # ownership of /opt/mesosphere/packages/{pkg_id} post build.
        cmd.run("package-builder", [
            "/bin/bash",
            "-o", "nounset",
            "-o", "pipefail",
            "-o", "errexit",
            "/pkg/build"])
    except CalledProcessError as ex:
        raise BuildError("docker exited non-zero: {}\nCommand: {}".format(ex.returncode, ' '.join(ex.cmd)))

    # Clean up the temporary install dir used for dependencies.
    # TODO(cmaloney): Move to an RAII wrapper.
    check_call(['rm', '-rf', install_dir])

    with logger.scope("Build package tarball"):
        # Check for forbidden services before packaging the tarball:
        try:
            check_forbidden_services(cache_abs("result"), RESERVED_UNIT_NAMES)
        except ValidationError as ex:
            raise BuildError("Package validation failed: {}".format(ex))

        # TODO(cmaloney): Updating / filling last_build should be moved out of
        # the build function.
        write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))

    # Bundle the artifacts into the pkgpanda package
    tmp_name = pkg_path + "-tmp.tar.xz"
    make_tar(tmp_name, cache_abs("result"))
    os.rename(tmp_name, pkg_path)
    print("Package built.")
    if clean_after_build:
        clean()
    return pkg_path
コード例 #11
0
ファイル: __init__.py プロジェクト: bernadinm/dcos
def make_bootstrap_tarball(package_store, packages, variant):
    # Convert filenames to package ids
    pkg_ids = list()
    for pkg_path in packages:
        # Get the package id from the given package path
        filename = os.path.basename(pkg_path)
        if not filename.endswith(".tar.xz"):
            raise BuildError("Packages must be packaged / end with a .tar.xz. Got {}".format(filename))
        pkg_id = filename[:-len(".tar.xz")]
        pkg_ids.append(pkg_id)

    bootstrap_cache_dir = package_store.get_bootstrap_cache_dir()

    # Filename is output_name.<sha-1>.{active.json|.bootstrap.tar.xz}
    bootstrap_id = hash_checkout(pkg_ids)
    latest_name = "{}/{}bootstrap.latest".format(bootstrap_cache_dir, pkgpanda.util.variant_prefix(variant))

    output_name = bootstrap_cache_dir + '/' + bootstrap_id + '.'

    # bootstrap tarball = <sha1 of packages in tarball>.bootstrap.tar.xz
    bootstrap_name = "{}bootstrap.tar.xz".format(output_name)
    active_name = "{}active.json".format(output_name)

    def mark_latest():
        # Ensure latest is always written
        write_string(latest_name, bootstrap_id)

        print("bootstrap: {}".format(bootstrap_name))
        print("active: {}".format(active_name))
        print("latest: {}".format(latest_name))
        return bootstrap_id

    if (os.path.exists(bootstrap_name)):
        print("Bootstrap already up to date, not recreating")
        return mark_latest()

    check_call(['mkdir', '-p', bootstrap_cache_dir])

    # Try downloading.
    if package_store.try_fetch_bootstrap_and_active(bootstrap_id):
        print("Bootstrap already up to date, Not recreating. Downloaded from repository-url.")
        return mark_latest()

    print("Unable to download from cache. Building.")

    print("Creating bootstrap tarball for variant {}".format(variant))

    work_dir = tempfile.mkdtemp(prefix='mkpanda_bootstrap_tmp')

    def make_abs(path):
        return os.path.join(work_dir, path)

    pkgpanda_root = make_abs("opt/mesosphere")
    repository = Repository(os.path.join(pkgpanda_root, "packages"))

    # Fetch all the packages to the root
    for pkg_path in packages:
        filename = os.path.basename(pkg_path)
        pkg_id = filename[:-len(".tar.xz")]

        def local_fetcher(id, target):
            shutil.unpack_archive(pkg_path, target, "gztar")
        repository.add(local_fetcher, pkg_id, False)

    # Activate the packages inside the repository.
    # Do generate dcos.target.wants inside the root so that we don't
    # try messing with /etc/systemd/system.
    install = Install(
        root=pkgpanda_root,
        config_dir=None,
        rooted_systemd=True,
        manage_systemd=False,
        block_systemd=True,
        fake_path=True,
        skip_systemd_dirs=True,
        manage_users=False,
        manage_state_dir=False)
    install.activate(repository.load_packages(pkg_ids))

    # Mark the tarball as a bootstrap tarball/filesystem so that
    # dcos-setup.service will fire.
    make_file(make_abs("opt/mesosphere/bootstrap"))

    # Write out an active.json for the bootstrap tarball
    write_json(active_name, pkg_ids)

    # Rewrite all the symlinks to point to /opt/mesosphere
    rewrite_symlinks(work_dir, work_dir, "/")

    make_tar(bootstrap_name, pkgpanda_root)

    shutil.rmtree(work_dir)

    # Update latest last so that we don't ever use partially-built things.
    write_string(latest_name, bootstrap_id)

    print("Built bootstrap")
    return mark_latest()
コード例 #12
0
ファイル: __init__.py プロジェクト: bernadinm/dcos
def hash_folder(directory):
    return hash_checkout(hash_files_in_folder(directory))
コード例 #13
0
ファイル: calc.py プロジェクト: pologood/dcos
def calculate_config_id(dcos_image_commit, template_filenames, sources_id):
    return hash_checkout({
        "commit": dcos_image_commit,
        "template_filenames": json.loads(template_filenames),
        "sources_id": sources_id
    })
コード例 #14
0
ファイル: calc.py プロジェクト: hatred/dcos
def calculate_config_id(dcos_image_commit, template_filenames, sources_id):
    return hash_checkout({
        "commit": dcos_image_commit,
        "template_filenames": json.loads(template_filenames),
        "sources_id": sources_id})
コード例 #15
0
def _build(package_store, name, variant, clean_after_build, recursive):
    assert isinstance(package_store, PackageStore)
    tmpdir = tempfile.TemporaryDirectory(prefix="pkgpanda_repo")
    repository = Repository(tmpdir.name)

    package_dir = package_store.get_package_folder(name)

    def src_abs(name):
        return package_dir + '/' + name

    def cache_abs(filename):
        return package_store.get_package_cache_folder(name) + '/' + filename

    # Build pkginfo over time, translating fields from buildinfo.
    pkginfo = {}

    # Build up the docker command arguments over time, translating fields as needed.
    cmd = DockerCmd()

    assert (name, variant) in package_store.packages, \
        "Programming error: name, variant should have been validated to be valid before calling build()."

    builder = IdBuilder(package_store.get_buildinfo(name, variant))
    final_buildinfo = dict()

    builder.add('name', name)
    builder.add('variant', pkgpanda.util.variant_str(variant))

    # Convert single_source -> sources
    if builder.has('sources'):
        if builder.has('single_source'):
            raise BuildError('Both sources and single_source cannot be specified at the same time')
        sources = builder.take('sources')
    elif builder.has('single_source'):
        sources = {name: builder.take('single_source')}
        builder.replace('single_source', 'sources', sources)
    else:
        builder.add('sources', {})
        sources = dict()
        print("NOTICE: No sources specified")

    final_buildinfo['sources'] = sources

    # Construct the source fetchers, gather the checkout ids from them
    checkout_ids = dict()
    fetchers = dict()
    try:
        for src_name, src_info in sorted(sources.items()):
            # TODO(cmaloney): Switch to a unified top level cache directory shared by all packages
            cache_dir = package_store.get_package_cache_folder(name) + '/' + src_name
            check_call(['mkdir', '-p', cache_dir])
            fetcher = get_src_fetcher(src_info, cache_dir, package_dir)
            fetchers[src_name] = fetcher
            checkout_ids[src_name] = fetcher.get_id()
    except ValidationError as ex:
        raise BuildError("Validation error when fetching sources for package: {}".format(ex))

    for src_name, checkout_id in checkout_ids.items():
        # NOTE: single_source buildinfo was expanded above so the src_name is
        # always correct here.
        # Make sure we never accidentally overwrite something which might be
        # important. Fields should match if specified (And that should be
        # tested at some point). For now disallowing identical saves hassle.
        assert_no_duplicate_keys(checkout_id, final_buildinfo['sources'][src_name])
        final_buildinfo['sources'][src_name].update(checkout_id)

    # Add the sha1 of the buildinfo.json + build file to the build ids
    builder.update('sources', checkout_ids)
    build_script = src_abs(builder.take('build_script'))
    # TODO(cmaloney): Change dest name to build_script_sha1
    builder.replace('build_script', 'build', pkgpanda.util.sha1(build_script))
    builder.add('pkgpanda_version', pkgpanda.build.constants.version)

    extra_dir = src_abs("extra")
    # Add the "extra" folder inside the package as an additional source if it
    # exists
    if os.path.exists(extra_dir):
        extra_id = hash_folder_abs(extra_dir, package_dir)
        builder.add('extra_source', extra_id)
        final_buildinfo['extra_source'] = extra_id

    # Figure out the docker name.
    docker_name = builder.take('docker')
    cmd.container = docker_name

    # Add the id of the docker build environment to the build_ids.
    try:
        docker_id = get_docker_id(docker_name)
    except CalledProcessError:
        # docker pull the container and try again
        check_call(['docker', 'pull', docker_name])
        docker_id = get_docker_id(docker_name)

    builder.update('docker', docker_id)

    # TODO(cmaloney): The environment variables should be generated during build
    # not live in buildinfo.json.
    pkginfo['environment'] = builder.take('environment')

    # Whether pkgpanda should on the host make sure a `/var/lib` state directory is available
    pkginfo['state_directory'] = builder.take('state_directory')
    if pkginfo['state_directory'] not in [True, False]:
        raise BuildError("state_directory in buildinfo.json must be a boolean `true` or `false`")

    username = None
    if builder.has('username'):
        username = builder.take('username')
        if not isinstance(username, str):
            raise BuildError("username in buildinfo.json must be either not set (no user for this"
                             " package), or a user name string")
        try:
            pkgpanda.UserManagement.validate_username(username)
        except ValidationError as ex:
            raise BuildError("username in buildinfo.json didn't meet the validation rules. {}".format(ex))
        pkginfo['username'] = username

    group = None
    if builder.has('group'):
        group = builder.take('group')
        if not isinstance(group, str):
            raise BuildError("group in buildinfo.json must be either not set (use default group for this user)"
                             ", or group must be a string")
        try:
            pkgpanda.UserManagement.validate_group_name(group)
        except ValidationError as ex:
            raise BuildError("group in buildinfo.json didn't meet the validation rules. {}".format(ex))
        pkginfo['group'] = group

    # Packages need directories inside the fake install root (otherwise docker
    # will try making the directories on a readonly filesystem), so build the
    # install root now, and make the package directories in it as we go.
    install_dir = tempfile.mkdtemp(prefix="pkgpanda-")

    active_packages = list()
    active_package_ids = set()
    active_package_variants = dict()
    auto_deps = set()

    # Final package has the same requires as the build.
    requires = builder.take('requires')
    pkginfo['requires'] = requires

    if builder.has("sysctl"):
        pkginfo["sysctl"] = builder.take("sysctl")

    # TODO(cmaloney): Pull generating the full set of requires a function.
    to_check = copy.deepcopy(requires)
    if type(to_check) != list:
        raise BuildError("`requires` in buildinfo.json must be an array of dependencies.")
    while to_check:
        requires_info = to_check.pop(0)
        requires_name, requires_variant = expand_require(requires_info)

        if requires_name in active_package_variants:
            # TODO(cmaloney): If one package depends on the <default>
            # variant of a package and 1+ others depends on a non-<default>
            # variant then update the dependency to the non-default variant
            # rather than erroring.
            if requires_variant != active_package_variants[requires_name]:
                # TODO(cmaloney): Make this contain the chains of
                # dependencies which contain the conflicting packages.
                # a -> b -> c -> d {foo}
                # e {bar} -> d {baz}
                raise BuildError(
                    "Dependncy on multiple variants of the same package {}. variants: {} {}".format(
                        requires_name,
                        requires_variant,
                        active_package_variants[requires_name]))

            # The variant has package {requires_name, variant} already is a
            # dependency, don't process it again / move on to the next.
            continue

        active_package_variants[requires_name] = requires_variant

        # Figure out the last build of the dependency, add that as the
        # fully expanded dependency.
        requires_last_build = package_store.get_last_build_filename(requires_name, requires_variant)
        if not os.path.exists(requires_last_build):
            if recursive:
                # Build the dependency
                build(package_store, requires_name, requires_variant, clean_after_build, recursive)
            else:
                raise BuildError("No last build file found for dependency {} variant {}. Rebuild "
                                 "the dependency".format(requires_name, requires_variant))

        try:
            pkg_id_str = load_string(requires_last_build)
            auto_deps.add(pkg_id_str)
            pkg_buildinfo = package_store.get_buildinfo(requires_name, requires_variant)
            pkg_requires = pkg_buildinfo['requires']
            pkg_path = repository.package_path(pkg_id_str)
            pkg_tar = pkg_id_str + '.tar.xz'
            if not os.path.exists(package_store.get_package_cache_folder(requires_name) + '/' + pkg_tar):
                raise BuildError(
                    "The build tarball {} refered to by the last_build file of the dependency {} "
                    "variant {} doesn't exist. Rebuild the dependency.".format(
                        pkg_tar,
                        requires_name,
                        requires_variant))

            active_package_ids.add(pkg_id_str)

            # Mount the package into the docker container.
            cmd.volumes[pkg_path] = "/opt/mesosphere/packages/{}:ro".format(pkg_id_str)
            os.makedirs(os.path.join(install_dir, "packages/{}".format(pkg_id_str)))

            # Add the dependencies of the package to the set which will be
            # activated.
            # TODO(cmaloney): All these 'transitive' dependencies shouldn't
            # be available to the package being built, only what depends on
            # them directly.
            to_check += pkg_requires
        except ValidationError as ex:
            raise BuildError("validating package needed as dependency {0}: {1}".format(requires_name, ex)) from ex
        except PackageError as ex:
            raise BuildError("loading package needed as dependency {0}: {1}".format(requires_name, ex)) from ex

    # Add requires to the package id, calculate the final package id.
    # NOTE: active_packages isn't fully constructed here since we lazily load
    # packages not already in the repository.
    builder.update('requires', list(active_package_ids))
    version_extra = None
    if builder.has('version_extra'):
        version_extra = builder.take('version_extra')

    build_ids = builder.get_build_ids()
    version_base = hash_checkout(build_ids)
    version = None
    if builder.has('version_extra'):
        version = "{0}-{1}".format(version_extra, version_base)
    else:
        version = version_base
    pkg_id = PackageId.from_parts(name, version)

    # Everything must have been extracted by now. If it wasn't, then we just
    # had a hard error that it was set but not used, as well as didn't include
    # it in the caluclation of the PackageId.
    builder = None

    # Save the build_ids. Useful for verify exactly what went into the
    # package build hash.
    final_buildinfo['build_ids'] = build_ids
    final_buildinfo['package_version'] = version

    # Save the package name and variant. The variant is used when installing
    # packages to validate dependencies.
    final_buildinfo['name'] = name
    final_buildinfo['variant'] = variant

    # If the package is already built, don't do anything.
    pkg_path = package_store.get_package_cache_folder(name) + '/{}.tar.xz'.format(pkg_id)

    # Done if it exists locally
    if exists(pkg_path):
        print("Package up to date. Not re-building.")

        # TODO(cmaloney): Updating / filling last_build should be moved out of
        # the build function.
        write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))

        return pkg_path

    # Try downloading.
    dl_path = package_store.try_fetch_by_id(pkg_id)
    if dl_path:
        print("Package up to date. Not re-building. Downloaded from repository-url.")
        # TODO(cmaloney): Updating / filling last_build should be moved out of
        # the build function.
        write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))
        print(dl_path, pkg_path)
        assert dl_path == pkg_path
        return pkg_path

    # Fall out and do the build since it couldn't be downloaded
    print("Unable to download from cache. Proceeding to build")

    print("Building package {} with buildinfo: {}".format(
        pkg_id,
        json.dumps(final_buildinfo, indent=2, sort_keys=True)))

    # Clean out src, result so later steps can use them freely for building.
    def clean():
        # Run a docker container to remove src/ and result/
        cmd = DockerCmd()
        cmd.volumes = {
            package_store.get_package_cache_folder(name): "/pkg/:rw",
        }
        cmd.container = "ubuntu:14.04.4"
        cmd.run("package-cleaner", ["rm", "-rf", "/pkg/src", "/pkg/result"])

    clean()

    # Only fresh builds are allowed which don't overlap existing artifacts.
    result_dir = cache_abs("result")
    if exists(result_dir):
        raise BuildError("result folder must not exist. It will be made when the package is "
                         "built. {}".format(result_dir))

    # 'mkpanda add' all implicit dependencies since we actually need to build.
    for dep in auto_deps:
        print("Auto-adding dependency: {}".format(dep))
        # NOTE: Not using the name pkg_id because that overrides the outer one.
        id_obj = PackageId(dep)
        add_package_file(repository, package_store.get_package_path(id_obj))
        package = repository.load(dep)
        active_packages.append(package)

    # Checkout all the sources int their respective 'src/' folders.
    try:
        src_dir = cache_abs('src')
        if os.path.exists(src_dir):
            raise ValidationError(
                "'src' directory already exists, did you have a previous build? " +
                "Currently all builds must be from scratch. Support should be " +
                "added for re-using a src directory when possible. src={}".format(src_dir))
        os.mkdir(src_dir)
        for src_name, fetcher in sorted(fetchers.items()):
            root = cache_abs('src/' + src_name)
            os.mkdir(root)

            fetcher.checkout_to(root)
    except ValidationError as ex:
        raise BuildError("Validation error when fetching sources for package: {}".format(ex))

    # Activate the packages so that we have a proper path, environment
    # variables.
    # TODO(cmaloney): RAII type thing for temproary directory so if we
    # don't get all the way through things will be cleaned up?
    install = Install(
        root=install_dir,
        config_dir=None,
        rooted_systemd=True,
        manage_systemd=False,
        block_systemd=True,
        fake_path=True,
        manage_users=False,
        manage_state_dir=False)
    install.activate(active_packages)
    # Rewrite all the symlinks inside the active path because we will
    # be mounting the folder into a docker container, and the absolute
    # paths to the packages will change.
    # TODO(cmaloney): This isn't very clean, it would be much nicer to
    # just run pkgpanda inside the package.
    rewrite_symlinks(install_dir, repository.path, "/opt/mesosphere/packages/")

    print("Building package in docker")

    # TODO(cmaloney): Run as a specific non-root user, make it possible
    # for non-root to cleanup afterwards.
    # Run the build, prepping the environment as necessary.
    mkdir(cache_abs("result"))

    # Copy the build info to the resulting tarball
    write_json(cache_abs("src/buildinfo.full.json"), final_buildinfo)
    write_json(cache_abs("result/buildinfo.full.json"), final_buildinfo)

    write_json(cache_abs("result/pkginfo.json"), pkginfo)

    # Make the folder for the package we are building. If docker does it, it
    # gets auto-created with root permissions and we can't actually delete it.
    os.makedirs(os.path.join(install_dir, "packages", str(pkg_id)))

    # TOOD(cmaloney): Disallow writing to well known files and directories?
    # Source we checked out
    cmd.volumes.update({
        # TODO(cmaloney): src should be read only...
        cache_abs("src"): "/pkg/src:rw",
        # The build script
        build_script: "/pkg/build:ro",
        # Getting the result out
        cache_abs("result"): "/opt/mesosphere/packages/{}:rw".format(pkg_id),
        install_dir: "/opt/mesosphere:ro"
    })

    if os.path.exists(extra_dir):
        cmd.volumes[extra_dir] = "/pkg/extra:ro"

    cmd.environment = {
        "PKG_VERSION": version,
        "PKG_NAME": name,
        "PKG_ID": pkg_id,
        "PKG_PATH": "/opt/mesosphere/packages/{}".format(pkg_id),
        "PKG_VARIANT": variant if variant is not None else "<default>",
        "NUM_CORES": multiprocessing.cpu_count()
    }

    try:
        # TODO(cmaloney): Run a wrapper which sources
        # /opt/mesosphere/environment then runs a build. Also should fix
        # ownership of /opt/mesosphere/packages/{pkg_id} post build.
        cmd.run("package-builder", [
            "/bin/bash",
            "-o", "nounset",
            "-o", "pipefail",
            "-o", "errexit",
            "/pkg/build"])
    except CalledProcessError as ex:
        raise BuildError("docker exited non-zero: {}\nCommand: {}".format(ex.returncode, ' '.join(ex.cmd)))

    # Clean up the temporary install dir used for dependencies.
    # TODO(cmaloney): Move to an RAII wrapper.
    check_call(['rm', '-rf', install_dir])

    with logger.scope("Build package tarball"):
        # Check for forbidden services before packaging the tarball:
        try:
            check_forbidden_services(cache_abs("result"), RESERVED_UNIT_NAMES)
        except ValidationError as ex:
            raise BuildError("Package validation failed: {}".format(ex))

        # TODO(cmaloney): Updating / filling last_build should be moved out of
        # the build function.
        write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))

    # Bundle the artifacts into the pkgpanda package
    tmp_name = pkg_path + "-tmp.tar.xz"
    make_tar(tmp_name, cache_abs("result"))
    os.rename(tmp_name, pkg_path)
    print("Package built.")
    if clean_after_build:
        clean()
    return pkg_path
コード例 #16
0
def hash_folder(directory):
    return hash_checkout(hash_files_in_folder(directory))
コード例 #17
0
ファイル: __init__.py プロジェクト: zhous1q/dcos
def generate(arguments,
             extra_templates=list(),
             extra_sources=list(),
             extra_targets=list()):
    # To maintain the old API where we passed arguments rather than the new name.
    user_arguments = arguments
    arguments = None

    sources, targets, templates = get_dcosconfig_source_target_and_templates(
        user_arguments, extra_templates, extra_sources)

    resolver = validate_and_raise(sources, targets + extra_targets)
    argument_dict = get_final_arguments(resolver)
    late_variables = get_late_variables(resolver, sources)
    secret_builtins = [
        'expanded_config_full', 'user_arguments_full', 'config_yaml_full'
    ]
    secret_variables = set(get_secret_variables(sources) + secret_builtins)
    masked_value = '**HIDDEN**'

    # Calculate config ID after all variables are resolved, to make sure any change in config yields a new config ID.
    config_id = get_config_id(argument_dict)

    # Calculate values that depend on the config ID.
    config_package_names = json.loads(argument_dict['config_package_names'])
    package_ids = json.loads(argument_dict['package_ids'])
    config_package_ids = [
        '{}--setup_{}'.format(name, config_id) for name in config_package_names
    ]
    cluster_packages = sorted(config_package_ids + package_ids)
    validate_cluster_packages(cluster_packages)
    cluster_package_list_id = hash_checkout(cluster_packages)

    # Calculate values for builtin variables.
    argument_dict['cluster_packages'] = json.dumps(cluster_packages)
    argument_dict['cluster_package_list_id'] = cluster_package_list_id
    user_arguments_masked = {
        k: (masked_value if k in secret_variables else v)
        for k, v in user_arguments.items()
    }
    argument_dict['user_arguments_full'] = json_prettyprint(user_arguments)
    argument_dict['user_arguments'] = json_prettyprint(user_arguments_masked)
    argument_dict['config_yaml_full'] = user_arguments_to_yaml(user_arguments)
    argument_dict['config_yaml'] = user_arguments_to_yaml(
        user_arguments_masked)

    # The expanded_config and expanded_config_full variables contain all other variables and their values.
    # expanded_config is a copy of expanded_config_full with secret values removed. Calculating these variables' values
    # must come after the calculation of all other variables to prevent infinite recursion.
    # TODO(cmaloney): Make this late-bound by gen.internals
    expanded_config_full = {
        k: v
        for k, v in argument_dict.items()
        # Omit late-bound variables whose values have not yet been calculated.
        if not v.startswith(gen.internals.LATE_BIND_PLACEHOLDER_START)
    }
    expanded_config_scrubbed = {
        k: v
        for k, v in expanded_config_full.items() if k not in secret_variables
    }
    argument_dict['expanded_config_full'] = format_expanded_config(
        expanded_config_full)
    argument_dict['expanded_config'] = format_expanded_config(
        expanded_config_scrubbed)

    # Initialize CA and add arguments (exhibitor_ca_certificate and exhibitor_ca_certificate_path)
    gen.exhibitor_tls_bootstrap.initialize_exhibitor_ca(argument_dict)

    log.debug("Final arguments:" + json_prettyprint({
        # Mask secret config values.
        k: (masked_value if k in secret_variables else v)
        for k, v in argument_dict.items()
    }))

    # Fill in the template parameters
    # TODO(cmaloney): render_templates should ideally take the template targets.
    rendered_templates = render_templates(templates, argument_dict)

    # Validate there aren't any unexpected top level directives in any of the files
    # (likely indicates a misspelling)
    for name, template in rendered_templates.items():
        if name == dcos_services_yaml:  # yaml list of the service files
            assert isinstance(template, list)
        elif name == cloud_config_yaml:
            assert template.keys() <= CLOUDCONFIG_KEYS, template.keys()
        elif isinstance(template, str):  # Not a yaml template
            pass
        else:  # yaml template file
            log.debug("validating template file %s", name)
            assert template.keys() <= PACKAGE_KEYS, template.keys()

    stable_artifacts = []
    channel_artifacts = []

    # Find all files which contain late bind variables and turn them into a "late bind package"
    # TODO(cmaloney): check there are no late bound variables in cloud-config.yaml
    late_files, regular_files = extract_files_containing_late_variables(
        rendered_templates[dcos_config_yaml]['package'])
    # put the regular files right back
    rendered_templates[dcos_config_yaml] = {'package': regular_files}

    # Render cluster package list artifact.
    cluster_package_list_filename = 'package_lists/{}.package_list.json'.format(
        cluster_package_list_id)
    os.makedirs(os.path.dirname(cluster_package_list_filename),
                mode=0o755,
                exist_ok=True)
    write_string(cluster_package_list_filename,
                 argument_dict['cluster_packages'])
    log.info('Cluster package list: {}'.format(cluster_package_list_filename))
    stable_artifacts.append(cluster_package_list_filename)

    def make_package_filename(package_id, extension):
        return 'packages/{0}/{1}{2}'.format(package_id.name, repr(package_id),
                                            extension)

    # Render all the cluster packages
    cluster_package_info = {}

    # Prepare late binding config, if any.
    late_package = build_late_package(late_files, config_id,
                                      argument_dict['provider'])
    if late_variables and late_package:
        # Render the late binding package. This package will be downloaded onto
        # each cluster node during bootstrap and rendered into the final config
        # using the values from the late config file.
        late_package_id = PackageId(late_package['name'])
        late_package_filename = make_package_filename(late_package_id,
                                                      '.dcos_config')
        os.makedirs(os.path.dirname(late_package_filename), mode=0o755)
        write_yaml(late_package_filename, {'package': late_package['package']},
                   default_flow_style=False)
        log.info('Package filename: {}'.format(late_package_filename))
        stable_artifacts.append(late_package_filename)

        # Add the late config file to cloud config. The expressions in
        # late_variables will be resolved by the service handling the cloud
        # config (e.g. Amazon CloudFormation). The rendered late config file
        # on a cluster node's filesystem will contain the final values.
        rendered_templates[cloud_config_yaml]['root'].append({
            'path':
            config_dir + '/setup-flags/late-config.yaml',
            'permissions':
            '0644',
            'owner':
            'root',
            # TODO(cmaloney): don't prettyprint to save bytes.
            # NOTE: Use yaml here simply to make avoiding painful escaping and
            # unescaping easier.
            'content':
            render_yaml({
                'late_bound_package_id': late_package['name'],
                'bound_values': late_variables
            })
        })

    # Collect metadata for cluster packages.
    for package_id_str in cluster_packages:
        package_id = PackageId(package_id_str)
        package_filename = make_package_filename(package_id, '.tar.xz')

        cluster_package_info[package_id.name] = {
            'id': package_id_str,
            'filename': package_filename
        }

    # Render config packages.
    for package_id_str in config_package_ids:
        package_id = PackageId(package_id_str)
        package_filename = cluster_package_info[package_id.name]['filename']
        do_gen_package(rendered_templates[package_id.name + '.yaml'],
                       cluster_package_info[package_id.name]['filename'])
        stable_artifacts.append(package_filename)

    # Convert cloud-config to just contain write_files rather than root
    cc = rendered_templates[cloud_config_yaml]

    # Shouldn't contain any packages. Providers should pull what they need to
    # late bind out of other packages via cc_package_file.
    assert 'package' not in cc
    cc_root = cc.pop('root', [])
    # Make sure write_files exists.
    assert 'write_files' not in cc
    cc['write_files'] = []
    # Do the transform
    for item in cc_root:
        assert is_absolute_path(item['path'])
        cc['write_files'].append(item)
    rendered_templates[cloud_config_yaml] = cc

    # Add utils that need to be defined here so they can be bound to locals.
    def add_services(cloudconfig, cloud_init_implementation):
        return add_units(cloudconfig, rendered_templates[dcos_services_yaml],
                         cloud_init_implementation)

    utils.add_services = add_services

    def add_stable_artifact(filename):
        assert filename not in stable_artifacts + channel_artifacts
        stable_artifacts.append(filename)

    utils.add_stable_artifact = add_stable_artifact

    def add_channel_artifact(filename):
        assert filename not in stable_artifacts + channel_artifacts
        channel_artifacts.append(filename)

    utils.add_channel_artifact = add_channel_artifact

    return Bunch({
        'arguments': argument_dict,
        'cluster_packages': cluster_package_info,
        'stable_artifacts': stable_artifacts,
        'channel_artifacts': channel_artifacts,
        'templates': rendered_templates,
        'utils': utils
    })