示例#1
0
    def _get_package_list(treeinfo_dict, key, excludes=None):
        """Return a list of package name strings from treeinfo_dict by key.

        If key isn't present in treeinfo_dict, an empty list is returned.

        """
        excludes = excludes or list()
        package_list = treeinfo_dict.get(key, list())

        # Validate package list.
        if not isinstance(package_list, list):
            raise BuildError("{} must be either null (meaning don't use) or a list of package names.".format(key))
        for package_name in package_list:
            if not isinstance(package_name, str):
                raise BuildError("{} must be a list of strings. Found a {} with the value: {}".format(
                    key, type(package_name), package_name))

            try:
                PackageId.validate_name(package_name)
            except ValidationError as ex:
                raise BuildError("Invalid package name in {}: {}".format(key, package_name)) from ex

            if package_name in excludes:
                raise BuildError("Package found in both exclude and {}: {}".format(key, package_name))

        return package_list
示例#2
0
文件: __init__.py 项目: alberts/dcos
    def _get_package_list(treeinfo_dict, key, excludes=None):
        """Return a list of package name strings from treeinfo_dict by key.

        If key isn't present in treeinfo_dict, an empty list is returned.

        """
        excludes = excludes or list()
        package_list = treeinfo_dict.get(key, list())

        # Validate package list.
        if not isinstance(package_list, list):
            raise BuildError("{} must be either null (meaning don't use) or a list of package names.".format(key))
        for package_name in package_list:
            if not isinstance(package_name, str):
                raise BuildError("{} must be a list of strings. Found a {} with the value: {}".format(
                    key, type(package_name), package_name))

            try:
                PackageId.validate_name(package_name)
            except ValidationError as ex:
                raise BuildError("Invalid package name in {}: {}".format(key, package_name)) from ex

            if package_name in excludes:
                raise BuildError("Package found in both exclude and {}: {}".format(key, package_name))

        return package_list
示例#3
0
文件: actions.py 项目: zhous1q/dcos
def swap_active_package(install, repository, package_id, systemd, block_systemd):
    """Replace an active package with a package_id with the same name.

    swap(install, repository, 'foo--version') will replace the active 'foo'
    package with 'foo--version'.

    install: pkgpanda.Install
    repository: pkgpanda.Repository
    package_id: package ID to activate
    systemd: start/stop systemd services
    block_systemd: if systemd, block waiting for systemd services to come up

    """
    active = install.get_active()
    # TODO(cmaloney): I guarantee there is a better way to write this and
    # I've written the same logic before...
    packages_by_name = dict()
    for id_str in active:
        pkg_id = PackageId(id_str)
        packages_by_name[pkg_id.name] = pkg_id

    new_id = PackageId(package_id)
    if new_id.name not in packages_by_name:
        raise ValidationError("No package with name {} currently active to swap with.".format(new_id.name))

    packages_by_name[new_id.name] = new_id
    new_active = list(map(str, packages_by_name.values()))
    # Activate with the new package name
    activate_packages(install, repository, new_active, systemd, block_systemd)
示例#4
0
    def visit(pkg_tuple):
        # Visit the node for the first (and only time). Finding a node again
        # means a cycle and should be detected at caller.

        assert isinstance(pkg_tuple, tuple)

        assert pkg_tuple not in visited
        visited.add(pkg_tuple)

        # Ensure all dependencies are built. Sorted for stability
        for require in sorted(package_store.packages[pkg_tuple]['requires']):
            require_tuple = expand_require(require)
            if require_tuple in built:
                continue
            if require_tuple in visited:
                raise BuildError("Circular dependency. Circular link {0} -> {1}".format(pkg_tuple, require_tuple))

            if PackageId.is_id(require_tuple[0]):
                raise BuildError("Depending on a specific package id is not supported. Package {} "
                                 "depends on {}".format(pkg_tuple, require_tuple))

            if require_tuple not in package_store.packages:
                raise BuildError("Package {0} require {1} not buildable from tree.".format(pkg_tuple, require_tuple))

            visit(require_tuple)

        build_order.append(pkg_tuple)
        built.add(pkg_tuple)
示例#5
0
文件: __init__.py 项目: zhous1q/dcos
def validate_cluster_packages(cluster_packages):
    for pkg_id in cluster_packages:
        try:
            PackageId(pkg_id)
        except pkgpanda.exceptions.ValidationError as ex:
            raise Exception('Invalid cluster package ID: {}'.format(
                str(ex))) from ex
示例#6
0
文件: calc.py 项目: pologood/dcos
def validate_cluster_packages(cluster_packages):
    pkg_id_list = json.loads(cluster_packages)
    for pkg_id in pkg_id_list:
        try:
            PackageId(pkg_id)
        except pkgpanda.exceptions.ValidationError as ex:
            raise AssertionError(str(ex)) from ex
示例#7
0
文件: actions.py 项目: zhous1q/dcos
def add_package_file(repository, package_filename):
    """Add a package to the repository from a file.

    repository: pkgpanda.Repository
    package_filename: location of the package file

    """
    filename_suffix = '.tar.xz'
    # Extract Package Id (Filename must be path/{pkg-id}.tar.xz).
    name = os.path.basename(package_filename)

    if not name.endswith(filename_suffix):
        raise ValidationError(
            "ERROR: Can only add package tarballs which have names like "
            "{{pkg-id}}{}".format(filename_suffix))

    pkg_id = name[:-len(filename_suffix)]

    # Validate the package id
    PackageId(pkg_id)

    def fetch(_, target):
        extract_tarball(package_filename, target)

    repository.add(fetch, pkg_id)
示例#8
0
    def visit(pkg_tuple):
        # Visit the node for the first (and only time). Finding a node again
        # means a cycle and should be detected at caller.

        assert isinstance(pkg_tuple, tuple)

        assert pkg_tuple not in visited
        visited.add(pkg_tuple)

        # Ensure all dependencies are built. Sorted for stability
        for require in sorted(package_store.packages[pkg_tuple]['requires']):
            require_tuple = expand_require(require)
            if require_tuple in built:
                continue
            if require_tuple in visited:
                raise BuildError(
                    "Circular dependency. Circular link {0} -> {1}".format(
                        pkg_tuple, require_tuple))

            if PackageId.is_id(require_tuple[0]):
                raise BuildError(
                    "Depending on a specific package id is not supported. Package {} "
                    "depends on {}".format(pkg_tuple, require_tuple))

            if require_tuple not in package_store.packages:
                raise BuildError(
                    "Package {0} require {1} not buildable from tree.".format(
                        pkg_tuple, require_tuple))

            visit(require_tuple)

        build_order.append(pkg_tuple)
        built.add(pkg_tuple)
示例#9
0
文件: actions.py 项目: zhous1q/dcos
def remove_package(install, repository, package_id):
    """Remove a package from the local repository.

    Errors if any packages in package_ids are activated in install.

    install: pkgpanda.Install
    repository: pkgpanda.Repository
    package_id: package ID to remove from repository

    """
    if package_id in install.get_active():
        raise PackageConflict("Refusing to remove active package {0}".format(package_id))

    sys.stdout.write("\rRemoving: {0}".format(package_id))
    sys.stdout.flush()
    try:
        # Validate package id, that package is installed.
        PackageId(package_id)
        repository.remove(package_id)
    except ValidationError as ex:
        raise ValidationError("Invalid package id {0}".format(package_id)) from ex
    except OSError as ex:
        raise Exception("Error removing package {0}: {1}".format(package_id, ex)) from ex
    else:
        sys.stdout.write("\rRemoved: {0}".format(package_id))
    finally:
        sys.stdout.write("\n")
        sys.stdout.flush()
示例#10
0
文件: cli.py 项目: yonglehou/dcos
def add_to_repository(repository, path):
    # Extract Package Id (Filename must be path/{pkg-id}.tar.xz).
    name = os.path.basename(path)

    if not name.endswith('.tar.xz'):
        print("ERROR: Can only add package tarballs which have names " +
              "like {pkg-id}.tar.xz")

    pkg_id = name[:-len('.tar.xz')]

    # Validate the package id
    PackageId(pkg_id)

    def fetch(_, target):
        extract_tarball(path, target)

    repository.add(fetch, pkg_id)
示例#11
0
    def visit(pkg_tuple):
        """Add a package and its requires to the build order.

        Raises AssertionError if pkg_tuple is in the set of visited packages.

        If the package has any requires, they're recursively visited and added
        to the build order depth-first. Then the package itself is added.

        """
        assert isinstance(pkg_tuple, tuple)

        # Visit the node for the first (and only) time.
        assert pkg_tuple not in visited
        visited.add(pkg_tuple)

        # Ensure all dependencies are built. Sorted for stability
        for require in sorted(package_store.packages[pkg_tuple]['requires']):
            require_tuple = expand_require(require)

            # If the dependency has already been built, we can move on.
            if require_tuple in built:
                continue
            # If the dependency has not been built but has been visited, then
            # there's a cycle in the dependency graph.
            if require_tuple in visited:
                raise BuildError(
                    "Circular dependency. Circular link {0} -> {1}".format(
                        pkg_tuple, require_tuple))

            if PackageId.is_id(require_tuple[0]):
                raise BuildError(
                    "Depending on a specific package id is not supported. Package {} "
                    "depends on {}".format(pkg_tuple, require_tuple))

            if require_tuple not in package_store.packages:
                raise BuildError(
                    "Package {0} require {1} not buildable from tree.".format(
                        pkg_tuple, require_tuple))

            # Add the dependency (after its dependencies, if any) to the build
            # order.
            visit(require_tuple)

        build_order.append(pkg_tuple)
        built.add(pkg_tuple)
示例#12
0
文件: __init__.py 项目: alberts/dcos
    def visit(pkg_tuple):
        """Add a package and its requires to the build order.

        Raises AssertionError if pkg_tuple is in the set of visited packages.

        If the package has any requires, they're recursively visited and added
        to the build order depth-first. Then the package itself is added.

        """
        assert isinstance(pkg_tuple, tuple)

        # Visit the node for the first (and only) time.
        assert pkg_tuple not in visited
        visited.add(pkg_tuple)

        # Ensure all dependencies are built. Sorted for stability
        for require in sorted(package_store.packages[pkg_tuple]['requires']):
            require_tuple = expand_require(require)

            # If the dependency has already been built, we can move on.
            if require_tuple in built:
                continue
            # If the dependency has not been built but has been visited, then
            # there's a cycle in the dependency graph.
            if require_tuple in visited:
                raise BuildError("Circular dependency. Circular link {0} -> {1}".format(pkg_tuple, require_tuple))

            if PackageId.is_id(require_tuple[0]):
                raise BuildError("Depending on a specific package id is not supported. Package {} "
                                 "depends on {}".format(pkg_tuple, require_tuple))

            if require_tuple not in package_store.packages:
                raise BuildError("Package {0} require {1} not buildable from tree.".format(pkg_tuple, require_tuple))

            # Add the dependency (after its dependencies, if any) to the build
            # order.
            visit(require_tuple)

        build_order.append(pkg_tuple)
        built.add(pkg_tuple)
示例#13
0
文件: __init__.py 项目: Jordan50/dcos
def build(variant, package_dir, name, repository_url, clean_after_build):
    print("Building package {} variant {}".format(name, variant or "<default>"))
    tmpdir = tempfile.TemporaryDirectory(prefix="pkgpanda_repo")
    repository = Repository(tmpdir.name)

    def pkg_abs(name):
        return package_dir + '/' + name

    # Build pkginfo over time, translating fields from buildinfo.
    pkginfo = {}

    # Build up the docker command arguments over time, translating fields as needed.
    cmd = DockerCmd()

    buildinfo = load_buildinfo(package_dir, variant)

    if 'name' in buildinfo:
        raise BuildError("'name' is not allowed in buildinfo.json, it is implicitly the name of the "
                         "folder containing the buildinfo.json")

    # Make sure build_script is only set on variants
    if 'build_script' in buildinfo and variant is None:
        raise BuildError("build_script can only be set on package variants")

    # Convert single_source -> sources
    try:
        sources = expand_single_source_alias(name, buildinfo)
    except ValidationError as ex:
        raise BuildError("Invalid buildinfo.json for package: {}".format(ex)) from ex

    # Save the final sources back into buildinfo so it gets written into
    # buildinfo.json. This also means buildinfo.json is always expanded form.
    buildinfo['sources'] = sources

    # Construct the source fetchers, gather the checkout ids from them
    checkout_ids = dict()
    fetchers = dict()
    try:
        for src_name, src_info in sorted(sources.items()):
            if src_info['kind'] not in pkgpanda.build.src_fetchers.all_fetchers:
                raise ValidationError("No known way to catch src with kind '{}'. Known kinds: {}".format(
                    src_info['kind'],
                    pkgpanda.src_fetchers.all_fetchers.keys()))

            cache_dir = pkg_abs("cache")
            if not os.path.exists(cache_dir):
                os.mkdir(cache_dir)

            fetchers[src_name] = pkgpanda.build.src_fetchers.all_fetchers[src_info['kind']](src_name,
                                                                                            src_info,
                                                                                            package_dir)
            checkout_ids[src_name] = fetchers[src_name].get_id()
    except ValidationError as ex:
        raise BuildError("Validation error when fetching sources for package: {}".format(ex))

    for src_name, checkout_id in checkout_ids.items():
        # NOTE: single_source buildinfo was expanded above so the src_name is
        # always correct here.
        # Make sure we never accidentally overwrite something which might be
        # important. Fields should match if specified (And that should be
        # tested at some point). For now disallowing identical saves hassle.
        assert_no_duplicate_keys(checkout_id, buildinfo['sources'][src_name])
        buildinfo['sources'][src_name].update(checkout_id)

    # Add the sha1sum of the buildinfo.json + build file to the build ids
    build_ids = {"sources": checkout_ids}
    build_ids['build'] = pkgpanda.util.sha1(pkg_abs("build"))
    build_ids['pkgpanda_version'] = pkgpanda.build.constants.version
    build_ids['variant'] = '' if variant is None else variant

    extra_dir = pkg_abs("extra")
    # Add the "extra" folder inside the package as an additional source if it
    # exists
    if os.path.exists(extra_dir):
        extra_id = hash_folder(extra_dir)
        build_ids['extra_source'] = extra_id
        buildinfo['extra_source'] = extra_id

    # Figure out the docker name.
    docker_name = buildinfo.get('docker', 'dcos-builder:latest')
    cmd.container = docker_name

    # Add the id of the docker build environment to the build_ids.
    try:
        docker_id = get_docker_id(docker_name)
    except CalledProcessError:
        # docker pull the container and try again
        check_call(['docker', 'pull', docker_name])
        docker_id = get_docker_id(docker_name)

    build_ids['docker'] = docker_id

    # TODO(cmaloney): The environment variables should be generated during build
    # not live in buildinfo.json.
    build_ids['environment'] = buildinfo.get('environment', {})

    # Packages need directories inside the fake install root (otherwise docker
    # will try making the directories on a readonly filesystem), so build the
    # install root now, and make the package directories in it as we go.
    install_dir = tempfile.mkdtemp(prefix="pkgpanda-")

    active_packages = list()
    active_package_ids = set()
    active_package_variants = dict()
    auto_deps = set()
    # Verify all requires are in the repository.
    if 'requires' in buildinfo:
        # Final package has the same requires as the build.
        pkginfo['requires'] = buildinfo['requires']

        # TODO(cmaloney): Pull generating the full set of requires a function.
        to_check = copy.deepcopy(buildinfo['requires'])
        if type(to_check) != list:
            raise BuildError("`requires` in buildinfo.json must be an array of dependencies.")
        while to_check:
            requires_info = to_check.pop(0)
            requires_name, requires_variant = expand_require(requires_info)

            if requires_name in active_package_variants:
                # TODO(cmaloney): If one package depends on the <default>
                # variant of a package and 1+ others depends on a non-<default>
                # variant then update the dependency to the non-default variant
                # rather than erroring.
                if requires_variant != active_package_variants[requires_name]:
                    # TODO(cmaloney): Make this contain the chains of
                    # dependencies which contain the conflicting packages.
                    # a -> b -> c -> d {foo}
                    # e {bar} -> d {baz}
                    raise BuildError("Dependncy on multiple variants of the same package {}. "
                                     "variants: {} {}".format(
                                        requires_name,
                                        requires_variant,
                                        active_package_variants[requires_name]))

                # The variant has package {requires_name, variant} already is a
                # dependency, don't process it again / move on to the next.
                continue

            active_package_variants[requires_name] = requires_variant

            # Figure out the last build of the dependency, add that as the
            # fully expanded dependency.
            require_package_dir = os.path.normpath(pkg_abs('../' + requires_name))
            last_build = require_package_dir + '/' + last_build_filename(requires_variant)
            if not os.path.exists(last_build):
                raise BuildError("No last build file found for dependency {} variant {}. Rebuild "
                                 "the dependency".format(requires_name, requires_variant))

            try:
                pkg_id_str = load_string(last_build)
                auto_deps.add(pkg_id_str)
                pkg_buildinfo = load_buildinfo(require_package_dir, requires_variant)
                pkg_requires = pkg_buildinfo.get('requires', list())
                pkg_path = repository.package_path(pkg_id_str)
                pkg_tar = pkg_id_str + '.tar.xz'
                if not os.path.exists(require_package_dir + '/' + pkg_tar):
                    raise BuildError("The build tarball {} refered to by the last_build file of the "
                                     "dependency {} variant {} doesn't exist. Rebuild the dependency.".format(
                                        pkg_tar,
                                        requires_name,
                                        requires_variant))

                active_package_ids.add(pkg_id_str)

                # Mount the package into the docker container.
                cmd.volumes[pkg_path] = "/opt/mesosphere/packages/{}:ro".format(pkg_id_str)
                os.makedirs(os.path.join(install_dir, "packages/{}".format(pkg_id_str)))

                # Add the dependencies of the package to the set which will be
                # activated.
                # TODO(cmaloney): All these 'transitive' dependencies shouldn't
                # be available to the package being built, only what depends on
                # them directly.
                to_check += pkg_requires
            except ValidationError as ex:
                raise BuildError("validating package needed as dependency {0}: {1}".format(requires_name, ex)) from ex
            except PackageError as ex:
                raise BuildError("loading package needed as dependency {0}: {1}".format(requires_name, ex)) from ex

    # Add requires to the package id, calculate the final package id.
    # NOTE: active_packages isn't fully constructed here since we lazily load
    # packages not already in the repository.
    build_ids['requires'] = list(active_package_ids)
    version_base = hash_checkout(build_ids)
    version = None
    if "version_extra" in buildinfo:
        version = "{0}-{1}".format(buildinfo["version_extra"], version_base)
    else:
        version = version_base
    pkg_id = PackageId.from_parts(name, version)

    # Save the build_ids. Useful for verify exactly what went into the
    # package build hash.
    buildinfo['build_ids'] = build_ids
    buildinfo['package_version'] = version

    # Save the package name and variant. The variant is used when installing
    # packages to validate dependencies.
    buildinfo['name'] = name
    buildinfo['variant'] = variant

    # If the package is already built, don't do anything.
    pkg_path = pkg_abs("{}.tar.xz".format(pkg_id))

    # Done if it exists locally
    if exists(pkg_path):
        print("Package up to date. Not re-building.")

        # TODO(cmaloney): Updating / filling last_build should be moved out of
        # the build function.
        check_call(["mkdir", "-p", pkg_abs("cache")])
        write_string(pkg_abs(last_build_filename(variant)), str(pkg_id))

        return pkg_path

    # Try downloading.
    if repository_url:
        tmp_filename = pkg_path + '.tmp'
        try:
            # Normalize to no trailing slash for repository_url
            repository_url = repository_url.rstrip('/')
            url = repository_url + '/packages/{0}/{1}.tar.xz'.format(pkg_id.name, str(pkg_id))
            print("Attempting to download", pkg_id, "from", url)
            download(tmp_filename, url, package_dir)
            os.rename(tmp_filename, pkg_path)

            print("Package up to date. Not re-building. Downloaded from repository-url.")
            # TODO(cmaloney): Updating / filling last_build should be moved out of
            # the build function.
            check_call(["mkdir", "-p", pkg_abs("cache")])
            write_string(pkg_abs(last_build_filename(variant)), str(pkg_id))
            return pkg_path
        except FetchError:
            try:
                os.remove(tmp_filename)
            except:
                pass

            # Fall out and do the build since the command errored.
            print("Unable to download from cache. Proceeding to build")

    print("Building package {} with buildinfo: {}".format(
        pkg_id,
        json.dumps(buildinfo, indent=2, sort_keys=True)))

    # Clean out src, result so later steps can use them freely for building.
    clean(package_dir)

    # Only fresh builds are allowed which don't overlap existing artifacts.
    result_dir = pkg_abs("result")
    if exists(result_dir):
        raise BuildError("result folder must not exist. It will be made when the package is "
                         "built. {}".format(result_dir))

    # 'mkpanda add' all implicit dependencies since we actually need to build.
    for dep in auto_deps:
        print("Auto-adding dependency: {}".format(dep))
        # NOTE: Not using the name pkg_id because that overrides the outer one.
        id_obj = PackageId(dep)
        add_to_repository(repository, pkg_abs('../{0}/{1}.tar.xz'.format(id_obj.name, dep)))
        package = repository.load(dep)
        active_packages.append(package)

    # Checkout all the sources int their respective 'src/' folders.
    try:
        src_dir = pkg_abs('src')
        if os.path.exists(src_dir):
            raise ValidationError(
                "'src' directory already exists, did you have a previous build? " +
                "Currently all builds must be from scratch. Support should be " +
                "added for re-using a src directory when possible. src={}".format(src_dir))
        os.mkdir(src_dir)
        for src_name, fetcher in sorted(fetchers.items()):
            root = pkg_abs('src/' + src_name)
            os.mkdir(root)

            fetcher.checkout_to(root)
    except ValidationError as ex:
        raise BuildError("Validation error when fetching sources for package: {}".format(ex))

    # Copy over environment settings
    if 'environment' in buildinfo:
        pkginfo['environment'] = buildinfo['environment']

    # Activate the packages so that we have a proper path, environment
    # variables.
    # TODO(cmaloney): RAII type thing for temproary directory so if we
    # don't get all the way through things will be cleaned up?
    install = Install(install_dir, None, True, False, True, True)
    install.activate(active_packages)
    # Rewrite all the symlinks inside the active path because we will
    # be mounting the folder into a docker container, and the absolute
    # paths to the packages will change.
    # TODO(cmaloney): This isn't very clean, it would be much nicer to
    # just run pkgpanda inside the package.
    rewrite_symlinks(install_dir, repository.path, "/opt/mesosphere/packages/")

    print("Building package in docker")

    # TODO(cmaloney): Run as a specific non-root user, make it possible
    # for non-root to cleanup afterwards.
    # Run the build, prepping the environment as necessary.
    mkdir(pkg_abs("result"))

    # Copy the build info to the resulting tarball
    write_json(pkg_abs("src/buildinfo.full.json"), buildinfo)
    write_json(pkg_abs("result/buildinfo.full.json"), buildinfo)

    write_json(pkg_abs("result/pkginfo.json"), pkginfo)

    # Make the folder for the package we are building. If docker does it, it
    # gets auto-created with root permissions and we can't actually delete it.
    os.makedirs(os.path.join(install_dir, "packages", str(pkg_id)))

    # TOOD(cmaloney): Disallow writing to well known files and directories?
    # Source we checked out
    cmd.volumes.update({
        # TODO(cmaloney): src should be read only...
        pkg_abs("src"): "/pkg/src:rw",
        # The build script
        pkg_abs(buildinfo.get('build_script', 'build')): "/pkg/build:ro",
        # Getting the result out
        pkg_abs("result"): "/opt/mesosphere/packages/{}:rw".format(pkg_id),
        install_dir: "/opt/mesosphere:ro"
    })

    if os.path.exists(extra_dir):
        cmd.volumes[extra_dir] = "/pkg/extra:ro"

    cmd.environment = {
        "PKG_VERSION": version,
        "PKG_NAME": name,
        "PKG_ID": pkg_id,
        "PKG_PATH": "/opt/mesosphere/packages/{}".format(pkg_id),
        "PKG_VARIANT": variant if variant is not None else "<default>"
    }

    try:
        # TODO(cmaloney): Run a wrapper which sources
        # /opt/mesosphere/environment then runs a build. Also should fix
        # ownership of /opt/mesosphere/packages/{pkg_id} post build.
        cmd.run([
            "/bin/bash",
            "-o", "nounset",
            "-o", "pipefail",
            "-o", "errexit",
            "/pkg/build"])
    except CalledProcessError as ex:
        raise BuildError("docker exited non-zero: {}\nCommand: {}".format(ex.returncode, ' '.join(ex.cmd)))

    # Clean up the temporary install dir used for dependencies.
    # TODO(cmaloney): Move to an RAII wrapper.
    check_call(['rm', '-rf', install_dir])

    print("Building package tarball")

    # Check for forbidden services before packaging the tarball:
    try:
        check_forbidden_services(pkg_abs("result"), RESERVED_UNIT_NAMES)
    except ValidationError as ex:
        raise BuildError("Package validation failed: {}".format(ex))

    # TODO(cmaloney): Updating / filling last_build should be moved out of
    # the build function.
    check_call(["mkdir", "-p", pkg_abs("cache")])
    write_string(pkg_abs(last_build_filename(variant)), str(pkg_id))

    # Bundle the artifacts into the pkgpanda package
    tmp_name = pkg_path + "-tmp.tar.xz"
    make_tar(tmp_name, pkg_abs("result"))
    os.rename(tmp_name, pkg_path)
    print("Package built.")
    if clean_after_build:
        clean(package_dir)
    return pkg_path
示例#14
0
文件: actions.py 项目: zhous1q/dcos
def _do_bootstrap(install, repository):
    # These files should be set by the environment which initially builds
    # the host (cloud-init).
    repository_url = if_exists(load_string, install.get_config_filename("setup-flags/repository-url"))

    def fetcher(id, target):
        if repository_url is None:
            raise ValidationError("ERROR: Non-local package {} but no repository url given.".format(id))
        return requests_fetcher(repository_url, id, target, os.getcwd())

    setup_pkg_dir = install.get_config_filename("setup-packages")
    if os.path.exists(setup_pkg_dir):
        raise ValidationError(
            "setup-packages is no longer supported. It's functionality has been replaced with late "
            "binding packages. Found setup packages dir: {}".format(setup_pkg_dir))

    setup_packages_to_activate = []

    # If the host has late config values, build the late config package from them.
    late_config = if_exists(load_yaml, install.get_config_filename("setup-flags/late-config.yaml"))
    if late_config:
        pkg_id_str = late_config['late_bound_package_id']
        late_values = late_config['bound_values']
        print("Binding late config to late package {}".format(pkg_id_str))
        print("Bound values: {}".format(late_values))

        if not PackageId.is_id(pkg_id_str):
            raise ValidationError("Invalid late package id: {}".format(pkg_id_str))
        pkg_id = PackageId(pkg_id_str)
        if pkg_id.version != "setup":
            raise ValidationError("Late package must have the version setup. Bad package: {}".format(pkg_id_str))

        # Collect the late config package.
        with tempfile.NamedTemporaryFile() as f:
            download(
                f.name,
                repository_url + '/packages/{0}/{1}.dcos_config'.format(pkg_id.name, pkg_id_str),
                os.getcwd(),
                rm_on_error=False,
            )
            late_package = load_yaml(f.name)

        # Resolve the late package using the bound late config values.
        final_late_package = resolve_late_package(late_package, late_values)

        # Render the package onto the filesystem and add it to the package
        # repository.
        with tempfile.NamedTemporaryFile() as f:
            do_gen_package(final_late_package, f.name)
            repository.add(lambda _, target: extract_tarball(f.name, target), pkg_id_str)
        setup_packages_to_activate.append(pkg_id_str)

    # If active.json is set on the host, use that as the set of packages to
    # activate. Otherwise just use the set of currently active packages (those
    # active in the bootstrap tarball)
    to_activate = None
    active_path = install.get_config_filename("setup-flags/active.json")
    if os.path.exists(active_path):
        print("Loaded active packages from", active_path)
        to_activate = load_json(active_path)

        # Ensure all packages are local
        print("Ensuring all packages in active set {} are local".format(",".join(to_activate)))
        for package in to_activate:
            repository.add(fetcher, package)
    else:
        print("Calculated active packages from bootstrap tarball")
        to_activate = list(install.get_active())

        package_list_filename = install.get_config_filename("setup-flags/cluster-package-list")
        print("Checking for cluster packages in:", package_list_filename)
        package_list_id = if_exists(load_string, package_list_filename)
        if package_list_id:
            print("Cluster package list:", package_list_id)
            cluster_packages = _get_package_list(package_list_id, repository_url)
            print("Loading cluster-packages: {}".format(cluster_packages))

            for package_id_str in cluster_packages:
                # Validate the package ids
                pkg_id = PackageId(package_id_str)

                # Fetch the packages if not local
                if not repository.has_package(package_id_str):
                    repository.add(fetcher, package_id_str)

                # Add the package to the set to activate
                setup_packages_to_activate.append(package_id_str)
        else:
            print("No cluster-packages specified")

    # Calculate the full set of final packages (Explicit activations + setup packages).
    # De-duplicate using a set.
    to_activate = list(set(to_activate + setup_packages_to_activate))

    print("Activating packages")
    install.activate(repository.load_packages(to_activate))
示例#15
0
文件: actions.py 项目: zouyee/dcos
def _do_bootstrap(install, repository):
    # These files should be set by the environment which initially builds
    # the host (cloud-init).
    repository_url = if_exists(
        load_string, install.get_config_filename("setup-flags/repository-url"))

    # TODO(cmaloney): If there is 1+ master, grab the active config from a master.
    # If the config can't be grabbed from any of them, fail.
    def fetcher(id, target):
        if repository_url is None:
            raise ValidationError(
                "ERROR: Non-local package {} but no repository url given.".
                format(repository_url))
        return requests_fetcher(repository_url, id, target, os.getcwd())

    # Copy host/cluster-specific packages written to the filesystem manually
    # from the setup-packages folder into the repository. Do not overwrite or
    # merge existing packages, hard fail instead.
    setup_packages_to_activate = []
    setup_pkg_dir = install.get_config_filename("setup-packages")
    copy_fetcher = partial(_copy_fetcher, setup_pkg_dir)
    if os.path.exists(setup_pkg_dir):
        for pkg_id_str in os.listdir(setup_pkg_dir):
            print("Installing setup package: {}".format(pkg_id_str))
            if not PackageId.is_id(pkg_id_str):
                raise ValidationError(
                    "Invalid package id in setup package: {}".format(
                        pkg_id_str))
            pkg_id = PackageId(pkg_id_str)
            if pkg_id.version != "setup":
                raise ValidationError(
                    "Setup packages (those in `{0}`) must have the version setup. "
                    "Bad package: {1}".format(setup_pkg_dir, pkg_id_str))

            # Make sure there is no existing package
            if repository.has_package(pkg_id_str):
                print("WARNING: Ignoring already installed package {}".format(
                    pkg_id_str))

            repository.add(copy_fetcher, pkg_id_str)
            setup_packages_to_activate.append(pkg_id_str)

    # If active.json is set on the host, use that as the set of packages to
    # activate. Otherwise just use the set of currently active packages (those
    # active in the bootstrap tarball)
    to_activate = None
    active_path = install.get_config_filename("setup-flags/active.json")
    if os.path.exists(active_path):
        print("Loaded active packages from", active_path)
        to_activate = load_json(active_path)

        # Ensure all packages are local
        print("Ensuring all packages in active set {} are local".format(
            ",".join(to_activate)))
        for package in to_activate:
            repository.add(fetcher, package)
    else:
        print("Calculated active packages from bootstrap tarball")
        to_activate = list(install.get_active())

        # Fetch and activate all requested additional packages to accompany the bootstrap packages.
        cluster_packages_filename = install.get_config_filename(
            "setup-flags/cluster-packages.json")
        cluster_packages = if_exists(load_json, cluster_packages_filename)
        print("Checking for cluster packages in:", cluster_packages_filename)
        if cluster_packages:
            if not isinstance(cluster_packages, list):
                print(
                    'ERROR: {} should contain a JSON list of packages. Got a {}'
                    .format(cluster_packages_filename, type(cluster_packages)))
            print("Loading cluster-packages: {}".format(cluster_packages))

            for package_id_str in cluster_packages:
                # Validate the package ids
                pkg_id = PackageId(package_id_str)

                # Fetch the packages if not local
                if not repository.has_package(package_id_str):
                    repository.add(fetcher, package_id_str)

                # Add the package to the set to activate
                setup_packages_to_activate.append(package_id_str)
        else:
            print("No cluster-packages specified")

    # Calculate the full set of final packages (Explicit activations + setup packages).
    # De-duplicate using a set.
    to_activate = list(set(to_activate + setup_packages_to_activate))

    print("Activating packages")
    install.activate(repository.load_packages(to_activate))
示例#16
0
文件: __init__.py 项目: alberts/dcos
def build(package_store, name, variant, clean_after_build, recursive=False):
    assert isinstance(package_store, PackageStore)
    print("Building package {} variant {}".format(name, pkgpanda.util.variant_str(variant)))
    tmpdir = tempfile.TemporaryDirectory(prefix="pkgpanda_repo")
    repository = Repository(tmpdir.name)

    package_dir = package_store.get_package_folder(name)

    def src_abs(name):
        return package_dir + '/' + name

    def cache_abs(filename):
        return package_store.get_package_cache_folder(name) + '/' + filename

    # Build pkginfo over time, translating fields from buildinfo.
    pkginfo = {}

    # Build up the docker command arguments over time, translating fields as needed.
    cmd = DockerCmd()

    assert (name, variant) in package_store.packages, \
        "Programming error: name, variant should have been validated to be valid before calling build()."

    builder = IdBuilder(package_store.get_buildinfo(name, variant))
    final_buildinfo = dict()

    builder.add('name', name)
    builder.add('variant', pkgpanda.util.variant_str(variant))

    # Convert single_source -> sources
    if builder.has('sources'):
        if builder.has('single_source'):
            raise BuildError('Both sources and single_source cannot be specified at the same time')
        sources = builder.take('sources')
    elif builder.has('single_source'):
        sources = {name: builder.take('single_source')}
        builder.replace('single_source', 'sources', sources)
    else:
        builder.add('sources', {})
        sources = dict()
        print("NOTICE: No sources specified")

    final_buildinfo['sources'] = sources

    # Construct the source fetchers, gather the checkout ids from them
    checkout_ids = dict()
    fetchers = dict()
    try:
        for src_name, src_info in sorted(sources.items()):
            # TODO(cmaloney): Switch to a unified top level cache directory shared by all packages
            cache_dir = package_store.get_package_cache_folder(name) + '/' + src_name
            check_call(['mkdir', '-p', cache_dir])
            fetcher = get_src_fetcher(src_info, cache_dir, package_dir)
            fetchers[src_name] = fetcher
            checkout_ids[src_name] = fetcher.get_id()
    except ValidationError as ex:
        raise BuildError("Validation error when fetching sources for package: {}".format(ex))

    for src_name, checkout_id in checkout_ids.items():
        # NOTE: single_source buildinfo was expanded above so the src_name is
        # always correct here.
        # Make sure we never accidentally overwrite something which might be
        # important. Fields should match if specified (And that should be
        # tested at some point). For now disallowing identical saves hassle.
        assert_no_duplicate_keys(checkout_id, final_buildinfo['sources'][src_name])
        final_buildinfo['sources'][src_name].update(checkout_id)

    # Add the sha1 of the buildinfo.json + build file to the build ids
    builder.update('sources', checkout_ids)
    build_script = src_abs(builder.take('build_script'))
    # TODO(cmaloney): Change dest name to build_script_sha1
    builder.replace('build_script', 'build', pkgpanda.util.sha1(build_script))
    builder.add('pkgpanda_version', pkgpanda.build.constants.version)

    extra_dir = src_abs("extra")
    # Add the "extra" folder inside the package as an additional source if it
    # exists
    if os.path.exists(extra_dir):
        extra_id = hash_folder(extra_dir)
        builder.add('extra_source', extra_id)
        final_buildinfo['extra_source'] = extra_id

    # Figure out the docker name.
    docker_name = builder.take('docker')
    cmd.container = docker_name

    # Add the id of the docker build environment to the build_ids.
    try:
        docker_id = get_docker_id(docker_name)
    except CalledProcessError:
        # docker pull the container and try again
        check_call(['docker', 'pull', docker_name])
        docker_id = get_docker_id(docker_name)

    builder.update('docker', docker_id)

    # TODO(cmaloney): The environment variables should be generated during build
    # not live in buildinfo.json.
    pkginfo['environment'] = builder.take('environment')

    # Whether pkgpanda should on the host make sure a `/var/lib` state directory is available
    pkginfo['state_directory'] = builder.take('state_directory')
    if pkginfo['state_directory'] not in [True, False]:
        raise BuildError("state_directory in buildinfo.json must be a boolean `true` or `false`")

    username = None
    if builder.has('username'):
        username = builder.take('username')
        if not isinstance(username, str):
            raise BuildError("username in buildinfo.json must be either not set (no user for this"
                             " package), or a user name string")
        try:
            pkgpanda.UserManagement.validate_username(username)
        except ValidationError as ex:
            raise BuildError("username in buildinfo.json didn't meet the validation rules. {}".format(ex))
        pkginfo['username'] = username

    group = None
    if builder.has('group'):
        group = builder.take('group')
        if not isinstance(group, str):
            raise BuildError("group in buildinfo.json must be either not set (use default group for this user)"
                             ", or group must be a string")
        try:
            pkgpanda.UserManagement.validate_group_name(group)
        except ValidationError as ex:
            raise BuildError("group in buildinfo.json didn't meet the validation rules. {}".format(ex))
        pkginfo['group'] = group

    # Packages need directories inside the fake install root (otherwise docker
    # will try making the directories on a readonly filesystem), so build the
    # install root now, and make the package directories in it as we go.
    install_dir = tempfile.mkdtemp(prefix="pkgpanda-")

    active_packages = list()
    active_package_ids = set()
    active_package_variants = dict()
    auto_deps = set()

    # Final package has the same requires as the build.
    requires = builder.take('requires')
    pkginfo['requires'] = requires

    if builder.has("sysctl"):
        pkginfo["sysctl"] = builder.take("sysctl")

    # TODO(cmaloney): Pull generating the full set of requires a function.
    to_check = copy.deepcopy(requires)
    if type(to_check) != list:
        raise BuildError("`requires` in buildinfo.json must be an array of dependencies.")
    while to_check:
        requires_info = to_check.pop(0)
        requires_name, requires_variant = expand_require(requires_info)

        if requires_name in active_package_variants:
            # TODO(cmaloney): If one package depends on the <default>
            # variant of a package and 1+ others depends on a non-<default>
            # variant then update the dependency to the non-default variant
            # rather than erroring.
            if requires_variant != active_package_variants[requires_name]:
                # TODO(cmaloney): Make this contain the chains of
                # dependencies which contain the conflicting packages.
                # a -> b -> c -> d {foo}
                # e {bar} -> d {baz}
                raise BuildError(
                    "Dependncy on multiple variants of the same package {}. variants: {} {}".format(
                        requires_name,
                        requires_variant,
                        active_package_variants[requires_name]))

            # The variant has package {requires_name, variant} already is a
            # dependency, don't process it again / move on to the next.
            continue

        active_package_variants[requires_name] = requires_variant

        # Figure out the last build of the dependency, add that as the
        # fully expanded dependency.
        requires_last_build = package_store.get_last_build_filename(requires_name, requires_variant)
        if not os.path.exists(requires_last_build):
            if recursive:
                # Build the dependency
                build(package_store, requires_name, requires_variant, clean_after_build, recursive)
            else:
                raise BuildError("No last build file found for dependency {} variant {}. Rebuild "
                                 "the dependency".format(requires_name, requires_variant))

        try:
            pkg_id_str = load_string(requires_last_build)
            auto_deps.add(pkg_id_str)
            pkg_buildinfo = package_store.get_buildinfo(requires_name, requires_variant)
            pkg_requires = pkg_buildinfo['requires']
            pkg_path = repository.package_path(pkg_id_str)
            pkg_tar = pkg_id_str + '.tar.xz'
            if not os.path.exists(package_store.get_package_cache_folder(requires_name) + '/' + pkg_tar):
                raise BuildError(
                    "The build tarball {} refered to by the last_build file of the dependency {} "
                    "variant {} doesn't exist. Rebuild the dependency.".format(
                        pkg_tar,
                        requires_name,
                        requires_variant))

            active_package_ids.add(pkg_id_str)

            # Mount the package into the docker container.
            cmd.volumes[pkg_path] = "/opt/mesosphere/packages/{}:ro".format(pkg_id_str)
            os.makedirs(os.path.join(install_dir, "packages/{}".format(pkg_id_str)))

            # Add the dependencies of the package to the set which will be
            # activated.
            # TODO(cmaloney): All these 'transitive' dependencies shouldn't
            # be available to the package being built, only what depends on
            # them directly.
            to_check += pkg_requires
        except ValidationError as ex:
            raise BuildError("validating package needed as dependency {0}: {1}".format(requires_name, ex)) from ex
        except PackageError as ex:
            raise BuildError("loading package needed as dependency {0}: {1}".format(requires_name, ex)) from ex

    # Add requires to the package id, calculate the final package id.
    # NOTE: active_packages isn't fully constructed here since we lazily load
    # packages not already in the repository.
    builder.update('requires', list(active_package_ids))
    version_extra = None
    if builder.has('version_extra'):
        version_extra = builder.take('version_extra')

    build_ids = builder.get_build_ids()
    version_base = hash_checkout(build_ids)
    version = None
    if builder.has('version_extra'):
        version = "{0}-{1}".format(version_extra, version_base)
    else:
        version = version_base
    pkg_id = PackageId.from_parts(name, version)

    # Everything must have been extracted by now. If it wasn't, then we just
    # had a hard error that it was set but not used, as well as didn't include
    # it in the caluclation of the PackageId.
    builder = None

    # Save the build_ids. Useful for verify exactly what went into the
    # package build hash.
    final_buildinfo['build_ids'] = build_ids
    final_buildinfo['package_version'] = version

    # Save the package name and variant. The variant is used when installing
    # packages to validate dependencies.
    final_buildinfo['name'] = name
    final_buildinfo['variant'] = variant

    # If the package is already built, don't do anything.
    pkg_path = package_store.get_package_cache_folder(name) + '/{}.tar.xz'.format(pkg_id)

    # Done if it exists locally
    if exists(pkg_path):
        print("Package up to date. Not re-building.")

        # TODO(cmaloney): Updating / filling last_build should be moved out of
        # the build function.
        write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))

        return pkg_path

    # Try downloading.
    dl_path = package_store.try_fetch_by_id(pkg_id)
    if dl_path:
        print("Package up to date. Not re-building. Downloaded from repository-url.")
        # TODO(cmaloney): Updating / filling last_build should be moved out of
        # the build function.
        write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))
        print(dl_path, pkg_path)
        assert dl_path == pkg_path
        return pkg_path

    # Fall out and do the build since it couldn't be downloaded
    print("Unable to download from cache. Proceeding to build")

    print("Building package {} with buildinfo: {}".format(
        pkg_id,
        json.dumps(final_buildinfo, indent=2, sort_keys=True)))

    # Clean out src, result so later steps can use them freely for building.
    def clean():
        # Run a docker container to remove src/ and result/
        cmd = DockerCmd()
        cmd.volumes = {
            package_store.get_package_cache_folder(name): "/pkg/:rw",
        }
        cmd.container = "ubuntu:14.04.4"
        cmd.run("package-cleaner", ["rm", "-rf", "/pkg/src", "/pkg/result"])

    clean()

    # Only fresh builds are allowed which don't overlap existing artifacts.
    result_dir = cache_abs("result")
    if exists(result_dir):
        raise BuildError("result folder must not exist. It will be made when the package is "
                         "built. {}".format(result_dir))

    # 'mkpanda add' all implicit dependencies since we actually need to build.
    for dep in auto_deps:
        print("Auto-adding dependency: {}".format(dep))
        # NOTE: Not using the name pkg_id because that overrides the outer one.
        id_obj = PackageId(dep)
        add_package_file(repository, package_store.get_package_path(id_obj))
        package = repository.load(dep)
        active_packages.append(package)

    # Checkout all the sources int their respective 'src/' folders.
    try:
        src_dir = cache_abs('src')
        if os.path.exists(src_dir):
            raise ValidationError(
                "'src' directory already exists, did you have a previous build? " +
                "Currently all builds must be from scratch. Support should be " +
                "added for re-using a src directory when possible. src={}".format(src_dir))
        os.mkdir(src_dir)
        for src_name, fetcher in sorted(fetchers.items()):
            root = cache_abs('src/' + src_name)
            os.mkdir(root)

            fetcher.checkout_to(root)
    except ValidationError as ex:
        raise BuildError("Validation error when fetching sources for package: {}".format(ex))

    # Activate the packages so that we have a proper path, environment
    # variables.
    # TODO(cmaloney): RAII type thing for temproary directory so if we
    # don't get all the way through things will be cleaned up?
    install = Install(
        root=install_dir,
        config_dir=None,
        rooted_systemd=True,
        manage_systemd=False,
        block_systemd=True,
        fake_path=True,
        manage_users=False,
        manage_state_dir=False)
    install.activate(active_packages)
    # Rewrite all the symlinks inside the active path because we will
    # be mounting the folder into a docker container, and the absolute
    # paths to the packages will change.
    # TODO(cmaloney): This isn't very clean, it would be much nicer to
    # just run pkgpanda inside the package.
    rewrite_symlinks(install_dir, repository.path, "/opt/mesosphere/packages/")

    print("Building package in docker")

    # TODO(cmaloney): Run as a specific non-root user, make it possible
    # for non-root to cleanup afterwards.
    # Run the build, prepping the environment as necessary.
    mkdir(cache_abs("result"))

    # Copy the build info to the resulting tarball
    write_json(cache_abs("src/buildinfo.full.json"), final_buildinfo)
    write_json(cache_abs("result/buildinfo.full.json"), final_buildinfo)

    write_json(cache_abs("result/pkginfo.json"), pkginfo)

    # Make the folder for the package we are building. If docker does it, it
    # gets auto-created with root permissions and we can't actually delete it.
    os.makedirs(os.path.join(install_dir, "packages", str(pkg_id)))

    # TOOD(cmaloney): Disallow writing to well known files and directories?
    # Source we checked out
    cmd.volumes.update({
        # TODO(cmaloney): src should be read only...
        cache_abs("src"): "/pkg/src:rw",
        # The build script
        build_script: "/pkg/build:ro",
        # Getting the result out
        cache_abs("result"): "/opt/mesosphere/packages/{}:rw".format(pkg_id),
        install_dir: "/opt/mesosphere:ro"
    })

    if os.path.exists(extra_dir):
        cmd.volumes[extra_dir] = "/pkg/extra:ro"

    cmd.environment = {
        "PKG_VERSION": version,
        "PKG_NAME": name,
        "PKG_ID": pkg_id,
        "PKG_PATH": "/opt/mesosphere/packages/{}".format(pkg_id),
        "PKG_VARIANT": variant if variant is not None else "<default>",
        "NUM_CORES": multiprocessing.cpu_count()
    }

    try:
        # TODO(cmaloney): Run a wrapper which sources
        # /opt/mesosphere/environment then runs a build. Also should fix
        # ownership of /opt/mesosphere/packages/{pkg_id} post build.
        cmd.run("package-builder", [
            "/bin/bash",
            "-o", "nounset",
            "-o", "pipefail",
            "-o", "errexit",
            "/pkg/build"])
    except CalledProcessError as ex:
        raise BuildError("docker exited non-zero: {}\nCommand: {}".format(ex.returncode, ' '.join(ex.cmd)))

    # Clean up the temporary install dir used for dependencies.
    # TODO(cmaloney): Move to an RAII wrapper.
    check_call(['rm', '-rf', install_dir])

    print("Building package tarball")

    # Check for forbidden services before packaging the tarball:
    try:
        check_forbidden_services(cache_abs("result"), RESERVED_UNIT_NAMES)
    except ValidationError as ex:
        raise BuildError("Package validation failed: {}".format(ex))

    # TODO(cmaloney): Updating / filling last_build should be moved out of
    # the build function.
    write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))

    # Bundle the artifacts into the pkgpanda package
    tmp_name = pkg_path + "-tmp.tar.xz"
    make_tar(tmp_name, cache_abs("result"))
    os.rename(tmp_name, pkg_path)
    print("Package built.")
    if clean_after_build:
        clean()
    return pkg_path
示例#17
0
def generate(arguments,
             extra_templates=list(),
             extra_sources=list(),
             extra_targets=list()):
    # To maintain the old API where we passed arguments rather than the new name.
    user_arguments = arguments
    arguments = None

    sources, targets, templates = get_dcosconfig_source_target_and_templates(
        user_arguments, extra_templates, extra_sources)

    # TODO(cmaloney): Make it so we only get out the dcosconfig target arguments not all the config target arguments.
    resolver = gen.internals.resolve_configuration(sources,
                                                   targets + extra_targets)
    status = resolver.status_dict

    if status['status'] == 'errors':
        raise ValidationError(errors=status['errors'], unset=status['unset'])

    # Gather out the late variables. The presence of late variables changes
    # whether or not a late package is created
    late_variables = dict()
    # TODO(branden): Get the late vars and expressions from resolver.late
    for source in sources:
        for setter_list in source.setters.values():
            for setter in setter_list:
                if not setter.is_late:
                    continue

                if setter.name not in resolver.late:
                    continue

                # Skip late vars that aren't referenced by config.
                if not resolver.arguments[setter.name].is_finalized:
                    continue

                # Validate a late variable should only have one source.
                assert setter.name not in late_variables

                late_variables[setter.name] = setter.late_expression

    argument_dict = {
        k: v.value
        for k, v in resolver.arguments.items() if v.is_finalized
    }

    # expanded_config is a special result which contains all other arguments. It has to come after
    # the calculation of all the other arguments so it can be filled with everything which was
    # calculated. Can't be calculated because that would have an infinite recursion problem (the set
    # of all arguments would want to include itself).
    # Explicitly / manaully setup so that it'll fit where we want it.
    # TODO(cmaloney): Make this late-bound by gen.internals
    argument_dict['expanded_config'] = textwrap.indent(
        json_prettyprint({
            k: v
            for k, v in argument_dict.items()
            if not v.startswith(gen.internals.LATE_BIND_PLACEHOLDER_START)
        }),
        prefix='  ' * 3,
    )
    log.debug("Final arguments:" + json_prettyprint(argument_dict))

    # Fill in the template parameters
    # TODO(cmaloney): render_templates should ideally take the template targets.
    rendered_templates = render_templates(templates, argument_dict)

    # Validate there aren't any unexpected top level directives in any of the files
    # (likely indicates a misspelling)
    for name, template in rendered_templates.items():
        if name == 'dcos-services.yaml':  # yaml list of the service files
            assert isinstance(template, list)
        elif name == 'cloud-config.yaml':
            assert template.keys() <= CLOUDCONFIG_KEYS, template.keys()
        elif isinstance(template, str):  # Not a yaml template
            pass
        else:  # yaml template file
            log.debug("validating template file %s", name)
            assert template.keys() <= PACKAGE_KEYS, template.keys()

    # Find all files which contain late bind variables and turn them into a "late bind package"
    # TODO(cmaloney): check there are no late bound variables in cloud-config.yaml
    late_files, regular_files = extract_files_containing_late_variables(
        rendered_templates['dcos-config.yaml']['package'])
    # put the regular files right back
    rendered_templates['dcos-config.yaml'] = {'package': regular_files}

    def make_package_filename(package_id, extension):
        return 'packages/{0}/{1}{2}'.format(package_id.name, repr(package_id),
                                            extension)

    # Render all the cluster packages
    cluster_package_info = {}

    # Prepare late binding config, if any.
    late_package = build_late_package(late_files, argument_dict['config_id'],
                                      argument_dict['provider'])
    if late_variables:
        # Render the late binding package. This package will be downloaded onto
        # each cluster node during bootstrap and rendered into the final config
        # using the values from the late config file.
        late_package_id = PackageId(late_package['name'])
        late_package_filename = make_package_filename(late_package_id,
                                                      '.dcos_config')
        os.makedirs(os.path.dirname(late_package_filename), mode=0o755)
        write_yaml(late_package_filename, {'package': late_package['package']},
                   default_flow_style=False)
        cluster_package_info[late_package_id.name] = {
            'id': late_package['name'],
            'filename': late_package_filename
        }

        # Add the late config file to cloud config. The expressions in
        # late_variables will be resolved by the service handling the cloud
        # config (e.g. Amazon CloudFormation). The rendered late config file
        # on a cluster node's filesystem will contain the final values.
        rendered_templates['cloud-config.yaml']['root'].append({
            'path':
            '/etc/mesosphere/setup-flags/late-config.yaml',
            'permissions':
            '0644',
            'owner':
            'root',
            # TODO(cmaloney): don't prettyprint to save bytes.
            # NOTE: Use yaml here simply to make avoiding painful escaping and
            # unescaping easier.
            'content':
            render_yaml({
                'late_bound_package_id': late_package['name'],
                'bound_values': late_variables
            })
        })

    # Render the rest of the packages.
    for package_id_str in json.loads(argument_dict['cluster_packages']):
        package_id = PackageId(package_id_str)
        package_filename = make_package_filename(package_id, '.tar.xz')

        # Build the package
        do_gen_package(rendered_templates[package_id.name + '.yaml'],
                       package_filename)

        cluster_package_info[package_id.name] = {
            'id': package_id_str,
            'filename': package_filename
        }

    # Convert cloud-config to just contain write_files rather than root
    cc = rendered_templates['cloud-config.yaml']

    # Shouldn't contain any packages. Providers should pull what they need to
    # late bind out of other packages via cc_package_file.
    assert 'package' not in cc
    cc_root = cc.pop('root', [])
    # Make sure write_files exists.
    assert 'write_files' not in cc
    cc['write_files'] = []
    # Do the transform
    for item in cc_root:
        assert item['path'].startswith('/')
        cc['write_files'].append(item)
    rendered_templates['cloud-config.yaml'] = cc

    # Add in the add_services util. Done here instead of the initial
    # map since we need to bind in parameters
    def add_services(cloudconfig, cloud_init_implementation):
        return add_units(cloudconfig, rendered_templates['dcos-services.yaml'],
                         cloud_init_implementation)

    utils.add_services = add_services

    return Bunch({
        'arguments': argument_dict,
        'cluster_packages': cluster_package_info,
        'templates': rendered_templates,
        'utils': utils
    })
示例#18
0
def build(package_store, name, variant, clean_after_build, recursive=False):
    assert isinstance(package_store, PackageStore)
    print("Building package {} variant {}".format(
        name, pkgpanda.util.variant_str(variant)))
    tmpdir = tempfile.TemporaryDirectory(prefix="pkgpanda_repo")
    repository = Repository(tmpdir.name)

    package_dir = package_store.get_package_folder(name)

    def src_abs(name):
        return package_dir + '/' + name

    def cache_abs(filename):
        return package_store.get_package_cache_folder(name) + '/' + filename

    # Build pkginfo over time, translating fields from buildinfo.
    pkginfo = {}

    # Build up the docker command arguments over time, translating fields as needed.
    cmd = DockerCmd()

    assert (name, variant) in package_store.packages, \
        "Programming error: name, variant should have been validated to be valid before calling build()."
    buildinfo = copy.deepcopy(package_store.get_buildinfo(name, variant))

    if 'name' in buildinfo:
        raise BuildError(
            "'name' is not allowed in buildinfo.json, it is implicitly the name of the "
            "folder containing the buildinfo.json")

    # Convert single_source -> sources
    try:
        sources = expand_single_source_alias(name, buildinfo)
    except ValidationError as ex:
        raise BuildError(
            "Invalid buildinfo.json for package: {}".format(ex)) from ex

    # Save the final sources back into buildinfo so it gets written into
    # buildinfo.json. This also means buildinfo.json is always expanded form.
    buildinfo['sources'] = sources

    # Construct the source fetchers, gather the checkout ids from them
    checkout_ids = dict()
    fetchers = dict()
    try:
        for src_name, src_info in sorted(sources.items()):
            # TODO(cmaloney): Switch to a unified top level cache directory shared by all packages
            cache_dir = package_store.get_package_cache_folder(
                name) + '/' + src_name
            check_call(['mkdir', '-p', cache_dir])
            fetcher = get_src_fetcher(src_info, cache_dir, package_dir)
            fetchers[src_name] = fetcher
            checkout_ids[src_name] = fetcher.get_id()
    except ValidationError as ex:
        raise BuildError(
            "Validation error when fetching sources for package: {}".format(
                ex))

    for src_name, checkout_id in checkout_ids.items():
        # NOTE: single_source buildinfo was expanded above so the src_name is
        # always correct here.
        # Make sure we never accidentally overwrite something which might be
        # important. Fields should match if specified (And that should be
        # tested at some point). For now disallowing identical saves hassle.
        assert_no_duplicate_keys(checkout_id, buildinfo['sources'][src_name])
        buildinfo['sources'][src_name].update(checkout_id)

    # Add the sha1 of the buildinfo.json + build file to the build ids
    build_ids = {"sources": checkout_ids}
    build_ids['build'] = pkgpanda.util.sha1(src_abs(buildinfo['build_script']))
    build_ids['pkgpanda_version'] = pkgpanda.build.constants.version
    build_ids['variant'] = '' if variant is None else variant

    extra_dir = src_abs("extra")
    # Add the "extra" folder inside the package as an additional source if it
    # exists
    if os.path.exists(extra_dir):
        extra_id = hash_folder(extra_dir)
        build_ids['extra_source'] = extra_id
        buildinfo['extra_source'] = extra_id

    # Figure out the docker name.
    docker_name = buildinfo['docker']
    cmd.container = docker_name

    # Add the id of the docker build environment to the build_ids.
    try:
        docker_id = get_docker_id(docker_name)
    except CalledProcessError:
        # docker pull the container and try again
        check_call(['docker', 'pull', docker_name])
        docker_id = get_docker_id(docker_name)

    build_ids['docker'] = docker_id

    # TODO(cmaloney): The environment variables should be generated during build
    # not live in buildinfo.json.
    build_ids['environment'] = buildinfo['environment']

    # Packages need directories inside the fake install root (otherwise docker
    # will try making the directories on a readonly filesystem), so build the
    # install root now, and make the package directories in it as we go.
    install_dir = tempfile.mkdtemp(prefix="pkgpanda-")

    active_packages = list()
    active_package_ids = set()
    active_package_variants = dict()
    auto_deps = set()
    # Verify all requires are in the repository.
    if 'requires' in buildinfo:
        # Final package has the same requires as the build.
        pkginfo['requires'] = buildinfo['requires']

        # TODO(cmaloney): Pull generating the full set of requires a function.
        to_check = copy.deepcopy(buildinfo['requires'])
        if type(to_check) != list:
            raise BuildError(
                "`requires` in buildinfo.json must be an array of dependencies."
            )
        while to_check:
            requires_info = to_check.pop(0)
            requires_name, requires_variant = expand_require(requires_info)

            if requires_name in active_package_variants:
                # TODO(cmaloney): If one package depends on the <default>
                # variant of a package and 1+ others depends on a non-<default>
                # variant then update the dependency to the non-default variant
                # rather than erroring.
                if requires_variant != active_package_variants[requires_name]:
                    # TODO(cmaloney): Make this contain the chains of
                    # dependencies which contain the conflicting packages.
                    # a -> b -> c -> d {foo}
                    # e {bar} -> d {baz}
                    raise BuildError(
                        "Dependncy on multiple variants of the same package {}. "
                        "variants: {} {}".format(
                            requires_name, requires_variant,
                            active_package_variants[requires_name]))

                # The variant has package {requires_name, variant} already is a
                # dependency, don't process it again / move on to the next.
                continue

            active_package_variants[requires_name] = requires_variant

            # Figure out the last build of the dependency, add that as the
            # fully expanded dependency.
            requires_last_build = package_store.get_last_build_filename(
                requires_name, requires_variant)
            if not os.path.exists(requires_last_build):
                if recursive:
                    # Build the dependency
                    build(package_store, requires_name, requires_variant,
                          clean_after_build, recursive)
                else:
                    raise BuildError(
                        "No last build file found for dependency {} variant {}. Rebuild "
                        "the dependency".format(requires_name,
                                                requires_variant))

            try:
                pkg_id_str = load_string(requires_last_build)
                auto_deps.add(pkg_id_str)
                pkg_buildinfo = package_store.get_buildinfo(
                    requires_name, requires_variant)
                pkg_requires = pkg_buildinfo['requires']
                pkg_path = repository.package_path(pkg_id_str)
                pkg_tar = pkg_id_str + '.tar.xz'
                if not os.path.exists(
                        package_store.get_package_cache_folder(requires_name) +
                        '/' + pkg_tar):
                    raise BuildError(
                        "The build tarball {} refered to by the last_build file of the "
                        "dependency {} variant {} doesn't exist. Rebuild the dependency."
                        .format(pkg_tar, requires_name, requires_variant))

                active_package_ids.add(pkg_id_str)

                # Mount the package into the docker container.
                cmd.volumes[
                    pkg_path] = "/opt/mesosphere/packages/{}:ro".format(
                        pkg_id_str)
                os.makedirs(
                    os.path.join(install_dir,
                                 "packages/{}".format(pkg_id_str)))

                # Add the dependencies of the package to the set which will be
                # activated.
                # TODO(cmaloney): All these 'transitive' dependencies shouldn't
                # be available to the package being built, only what depends on
                # them directly.
                to_check += pkg_requires
            except ValidationError as ex:
                raise BuildError(
                    "validating package needed as dependency {0}: {1}".format(
                        requires_name, ex)) from ex
            except PackageError as ex:
                raise BuildError(
                    "loading package needed as dependency {0}: {1}".format(
                        requires_name, ex)) from ex

    # Add requires to the package id, calculate the final package id.
    # NOTE: active_packages isn't fully constructed here since we lazily load
    # packages not already in the repository.
    build_ids['requires'] = list(active_package_ids)
    version_base = hash_checkout(build_ids)
    version = None
    if "version_extra" in buildinfo:
        version = "{0}-{1}".format(buildinfo["version_extra"], version_base)
    else:
        version = version_base
    pkg_id = PackageId.from_parts(name, version)

    # Save the build_ids. Useful for verify exactly what went into the
    # package build hash.
    buildinfo['build_ids'] = build_ids
    buildinfo['package_version'] = version

    # Save the package name and variant. The variant is used when installing
    # packages to validate dependencies.
    buildinfo['name'] = name
    buildinfo['variant'] = variant

    # If the package is already built, don't do anything.
    pkg_path = package_store.get_package_cache_folder(
        name) + '/{}.tar.xz'.format(pkg_id)

    # Done if it exists locally
    if exists(pkg_path):
        print("Package up to date. Not re-building.")

        # TODO(cmaloney): Updating / filling last_build should be moved out of
        # the build function.
        write_string(package_store.get_last_build_filename(name, variant),
                     str(pkg_id))

        return pkg_path

    # Try downloading.
    dl_path = package_store.try_fetch_by_id(pkg_id)
    if dl_path:
        print(
            "Package up to date. Not re-building. Downloaded from repository-url."
        )
        # TODO(cmaloney): Updating / filling last_build should be moved out of
        # the build function.
        write_string(package_store.get_last_build_filename(name, variant),
                     str(pkg_id))
        print(dl_path, pkg_path)
        assert dl_path == pkg_path
        return pkg_path

    # Fall out and do the build since it couldn't be downloaded
    print("Unable to download from cache. Proceeding to build")

    print("Building package {} with buildinfo: {}".format(
        pkg_id, json.dumps(buildinfo, indent=2, sort_keys=True)))

    # Clean out src, result so later steps can use them freely for building.
    def clean():
        # Run a docker container to remove src/ and result/
        cmd = DockerCmd()
        cmd.volumes = {
            package_store.get_package_cache_folder(name): "/pkg/:rw",
        }
        cmd.container = "ubuntu:14.04.4"
        cmd.run(["rm", "-rf", "/pkg/src", "/pkg/result"])

    clean()

    # Only fresh builds are allowed which don't overlap existing artifacts.
    result_dir = cache_abs("result")
    if exists(result_dir):
        raise BuildError(
            "result folder must not exist. It will be made when the package is "
            "built. {}".format(result_dir))

    # 'mkpanda add' all implicit dependencies since we actually need to build.
    for dep in auto_deps:
        print("Auto-adding dependency: {}".format(dep))
        # NOTE: Not using the name pkg_id because that overrides the outer one.
        id_obj = PackageId(dep)
        add_package_file(repository, package_store.get_package_path(id_obj))
        package = repository.load(dep)
        active_packages.append(package)

    # Checkout all the sources int their respective 'src/' folders.
    try:
        src_dir = cache_abs('src')
        if os.path.exists(src_dir):
            raise ValidationError(
                "'src' directory already exists, did you have a previous build? "
                +
                "Currently all builds must be from scratch. Support should be "
                + "added for re-using a src directory when possible. src={}".
                format(src_dir))
        os.mkdir(src_dir)
        for src_name, fetcher in sorted(fetchers.items()):
            root = cache_abs('src/' + src_name)
            os.mkdir(root)

            fetcher.checkout_to(root)
    except ValidationError as ex:
        raise BuildError(
            "Validation error when fetching sources for package: {}".format(
                ex))

    # Copy over environment settings
    pkginfo['environment'] = buildinfo['environment']

    # Whether pkgpanda should on the host make sure a `/var/lib` state directory is available
    pkginfo['state_directory'] = buildinfo.get('state_directory', False)
    if pkginfo['state_directory'] not in [True, False]:
        raise BuildError(
            "state_directory in buildinfo.json must be a boolean `true` or `false`"
        )

    username = buildinfo.get('username')
    if not (username is None or isinstance(username, str)):
        raise BuildError(
            "username in buildinfo.json must be either not set (no user for this"
            " package), or a user name string")
    if username:
        try:
            pkgpanda.UserManagement.validate_username(username)
        except ValidationError as ex:
            raise BuildError(
                "username in buildinfo.json didn't meet the validation rules. {}"
                .format(ex))
    pkginfo['username'] = username

    # Activate the packages so that we have a proper path, environment
    # variables.
    # TODO(cmaloney): RAII type thing for temproary directory so if we
    # don't get all the way through things will be cleaned up?
    install = Install(root=install_dir,
                      config_dir=None,
                      rooted_systemd=True,
                      manage_systemd=False,
                      block_systemd=True,
                      fake_path=True,
                      manage_users=False,
                      manage_state_dir=False)
    install.activate(active_packages)
    # Rewrite all the symlinks inside the active path because we will
    # be mounting the folder into a docker container, and the absolute
    # paths to the packages will change.
    # TODO(cmaloney): This isn't very clean, it would be much nicer to
    # just run pkgpanda inside the package.
    rewrite_symlinks(install_dir, repository.path, "/opt/mesosphere/packages/")

    print("Building package in docker")

    # TODO(cmaloney): Run as a specific non-root user, make it possible
    # for non-root to cleanup afterwards.
    # Run the build, prepping the environment as necessary.
    mkdir(cache_abs("result"))

    # Copy the build info to the resulting tarball
    write_json(cache_abs("src/buildinfo.full.json"), buildinfo)
    write_json(cache_abs("result/buildinfo.full.json"), buildinfo)

    write_json(cache_abs("result/pkginfo.json"), pkginfo)

    # Make the folder for the package we are building. If docker does it, it
    # gets auto-created with root permissions and we can't actually delete it.
    os.makedirs(os.path.join(install_dir, "packages", str(pkg_id)))

    # TOOD(cmaloney): Disallow writing to well known files and directories?
    # Source we checked out
    cmd.volumes.update({
        # TODO(cmaloney): src should be read only...
        cache_abs("src"):
        "/pkg/src:rw",
        # The build script
        src_abs(buildinfo['build_script']):
        "/pkg/build:ro",
        # Getting the result out
        cache_abs("result"):
        "/opt/mesosphere/packages/{}:rw".format(pkg_id),
        install_dir:
        "/opt/mesosphere:ro"
    })

    if os.path.exists(extra_dir):
        cmd.volumes[extra_dir] = "/pkg/extra:ro"

    cmd.environment = {
        "PKG_VERSION": version,
        "PKG_NAME": name,
        "PKG_ID": pkg_id,
        "PKG_PATH": "/opt/mesosphere/packages/{}".format(pkg_id),
        "PKG_VARIANT": variant if variant is not None else "<default>"
    }

    try:
        # TODO(cmaloney): Run a wrapper which sources
        # /opt/mesosphere/environment then runs a build. Also should fix
        # ownership of /opt/mesosphere/packages/{pkg_id} post build.
        cmd.run([
            "/bin/bash", "-o", "nounset", "-o", "pipefail", "-o", "errexit",
            "/pkg/build"
        ])
    except CalledProcessError as ex:
        raise BuildError("docker exited non-zero: {}\nCommand: {}".format(
            ex.returncode, ' '.join(ex.cmd)))

    # Clean up the temporary install dir used for dependencies.
    # TODO(cmaloney): Move to an RAII wrapper.
    check_call(['rm', '-rf', install_dir])

    print("Building package tarball")

    # Check for forbidden services before packaging the tarball:
    try:
        check_forbidden_services(cache_abs("result"), RESERVED_UNIT_NAMES)
    except ValidationError as ex:
        raise BuildError("Package validation failed: {}".format(ex))

    # TODO(cmaloney): Updating / filling last_build should be moved out of
    # the build function.
    write_string(package_store.get_last_build_filename(name, variant),
                 str(pkg_id))

    # Bundle the artifacts into the pkgpanda package
    tmp_name = pkg_path + "-tmp.tar.xz"
    make_tar(tmp_name, cache_abs("result"))
    os.rename(tmp_name, pkg_path)
    print("Package built.")
    if clean_after_build:
        clean()
    return pkg_path
示例#19
0
文件: cli.py 项目: yonglehou/dcos
def main():
    arguments = docopt(__doc__, version="Pkpganda Package Manager")
    umask(0o022)

    # NOTE: Changing root or repository will likely break actually running packages.
    install = Install(os.path.abspath(arguments['--root']),
                      os.path.abspath(arguments['--config-dir']),
                      arguments['--rooted-systemd'],
                      not arguments['--no-systemd'],
                      not arguments['--no-block-systemd'])
    repository = Repository(os.path.abspath(arguments['--repository']))

    if arguments['setup']:
        try:
            setup(install, repository)
        except ValidationError as ex:
            print("Validation Error: {0}".format(ex))
            sys.exit(1)
        sys.exit(0)

    if arguments['list']:
        print_repo_list(repository.list())
        sys.exit(0)

    if arguments['active']:
        for pkg in sorted(install.get_active()):
            print(pkg)
        sys.exit(0)

    if arguments['add']:
        add_to_repository(repository, arguments['<package-tarball>'])
        sys.exit(0)

    if arguments['fetch']:

        def fetcher(id, target):
            return requests_fetcher(arguments['--repository-url'], id, target,
                                    os.getcwd())

        for pkg_id in arguments['<id>']:
            # TODO(cmaloney): Make this not use escape sequences when not at a
            # `real` terminal.
            sys.stdout.write("\rFetching: {0}".format(pkg_id))
            sys.stdout.flush()
            try:
                repository.add(fetcher, pkg_id)
            except FetchError as ex:
                print("\nUnable to fetch package {0}: {1}".format(pkg_id, ex))
                sys.exit(1)
            sys.stdout.write("\rFetched: {0}\n".format(pkg_id))
            sys.stdout.flush()

        sys.exit(0)

    if arguments['activate']:
        do_activate(install, repository, arguments['<id>'],
                    arguments['--no-systemd'], arguments['--no-block-systemd'])
        sys.exit(0)

    if arguments['swap']:
        active = install.get_active()
        # TODO(cmaloney): I guarantee there is a better way to write this and
        # I've written the same logic before...
        packages_by_name = dict()
        for id_str in active:
            pkg_id = PackageId(id_str)
            packages_by_name[pkg_id.name] = pkg_id

        new_id = PackageId(arguments['<package-id>'])
        if new_id.name not in packages_by_name:
            print(
                "ERROR: No package with name {} currently active to swap with."
                .format(new_id.name))

        packages_by_name[new_id.name] = new_id
        new_active = list(map(str, packages_by_name.values()))
        # Activate with the new package name
        do_activate(install, repository, new_active, arguments['--no-systemd'],
                    arguments['--no-block-systemd'])
        sys.exit(0)

    if arguments['remove']:
        # Make sure none of the packages are active
        active_packages = install.get_active()
        active = active_packages.intersection(set(arguments['<id>']))
        if len(active) > 0:
            print("Refusing to remove active packages {0}".format(" ".join(
                sorted(list(active)))))
            sys.exit(1)

        for pkg_id in arguments['<id>']:
            sys.stdout.write("\rRemoving: {0}".format(pkg_id))
            sys.stdout.flush()
            try:
                # Validate package id, that package is installed.
                PackageId(pkg_id)
                repository.remove(pkg_id)
            except ValidationError:
                print("\nInvalid package id {0}".format(pkg_id))
                sys.exit(1)
            except OSError as ex:
                print("\nError removing package {0}".format(pkg_id))
                print(ex)
                sys.exit(1)
            sys.stdout.write("\rRemoved: {0}\n".format(pkg_id))
            sys.stdout.flush()
        sys.exit(0)

    if arguments['uninstall']:
        uninstall(install, repository)
        sys.exit(0)

    if arguments['check']:
        checks = find_checks(install, repository)
        if arguments['--list']:
            list_checks(checks)
            sys.exit(0)
        # Run all checks
        sys.exit(run_checks(checks, install, repository))

    print("unknown command")
    sys.exit(1)
示例#20
0
def generate(arguments,
             extra_templates=list(),
             cc_package_files=list(),
             validate_only=False):
    log.info("Generating configuration files...")

    assert isinstance(extra_templates, list)

    # To maintain the old API where we passed arguments rather than the new name.
    user_arguments = arguments
    arguments = None

    setters = dict()
    validate = list()

    # Make sure all user provided arguments are strings.
    validate_arguments_strings(user_arguments)

    # TODO(cmaloney): Make these all just defined by the base calc.py
    package_names = ['dcos-config', 'dcos-metadata']
    template_filenames = [
        'dcos-config.yaml', 'cloud-config.yaml', 'dcos-metadata.yaml',
        'dcos-services.yaml'
    ]

    # TODO(cmaloney): Check there are no duplicates between templates and extra_template_files
    template_filenames += extra_templates

    def add_setter(name, value, is_optional, conditions, is_user,
                   replace_existing):
        if replace_existing:
            if name in setters:
                del setters[name]
        setters.setdefault(name, list()).append(
            Setter(name, value, is_optional, conditions, is_user))

    def add_conditional_scope(scope, conditions, replace_existing):
        nonlocal validate

        # TODO(cmaloney): 'defaults' are the same as 'can' and 'must' is identical to 'arguments' except
        # that one takes functions and one takes strings. Simplify to just 'can', 'must'.
        assert scope.keys() <= {'validate', 'default', 'must', 'conditional'}

        validate += scope.get('validate', list())

        for name, fn in scope.get('must', dict()).items():
            add_setter(name, fn, False, conditions, False, replace_existing)

        for name, fn in scope.get('default', dict()).items():
            add_setter(name, fn, True, conditions, False, replace_existing)

        for name, cond_options in scope.get('conditional', dict()).items():
            for value, sub_scope in cond_options.items():
                add_conditional_scope(sub_scope,
                                      conditions + [(name, value)],
                                      replace_existing=replace_existing)

    add_conditional_scope(gen.calc.entry, [], replace_existing=False)

    # Allow overriding calculators with a `gen_extra/calc.py` if it exists
    if os.path.exists('gen_extra/calc.py'):
        mod = importlib.machinery.SourceFileLoader(
            'gen_extra.calc', 'gen_extra/calc.py').load_module()
        add_conditional_scope(mod.entry, [], replace_existing=True)

    # Add in all user arguments as setters.
    # Happens last so that they are never overwritten with replace_existing=True
    for name, value in user_arguments.items():
        add_setter(name, value, False, [], True, False)

    # Re-arrange templates to be indexed by common name. Only allow multiple for one key if the key
    # is yaml (ends in .yaml).
    templates = dict()
    for filename in template_filenames:
        key = os.path.basename(filename)
        templates.setdefault(key, list())
        templates[key].append(filename)

        if len(templates[key]) > 1 and not key.endswith('.yaml'):
            raise Exception(
                "Internal Error: Only know how to merge YAML templates at this point in time. "
                "Can't merge template {} in template_list {}".format(
                    name, templates[key]))

    mandatory_parameters = get_parameters(templates)

    validate_all_arguments_match_parameters(mandatory_parameters, setters,
                                            user_arguments)

    def add_builtin(name, value):
        add_setter(name, json.dumps(value, **json_prettyprint_args), False, [],
                   False, False)

    # TODO(cmaloney): Hash the contents of all teh templates rather than using the list of filenames
    # since the filenames might not live in this git repo, or may be locally modified.
    add_builtin('template_filenames', template_filenames)
    add_builtin('package_names', list(package_names))
    add_builtin('user_arguments', user_arguments)

    # Add a builtin for expanded_config, so that we won't get unset argument errors. The temporary
    # value will get replaced with the set of all arguments once calculation is complete
    temporary_str = 'DO NOT USE THIS AS AN ARGUMENT TO OTHER ARGUMENTS. IT IS TEMPORARY'
    add_builtin('expanded_config', temporary_str)

    # Calculate the remaining arguments.
    arguments = DFSArgumentCalculator(setters,
                                      validate).calculate(mandatory_parameters)

    # Validate all new / calculated arguments are strings.
    validate_arguments_strings(arguments)

    log.info("Final arguments:" +
             json.dumps(arguments, **json_prettyprint_args))

    # expanded_config is a special result which contains all other arguments. It has to come after
    # the calculation of all the other arguments so it can be filled with everything which was
    # calculated. Can't be calculated because that would have an infinite recursion problem (the set
    # of all arguments would want to include itself).
    # Explicitly / manaully setup so that it'll fit where we want it.
    arguments['expanded_config'] = textwrap.indent(json.dumps(
        arguments, **json_prettyprint_args),
                                                   prefix='  ' * 3)

    if validate_only:
        return

    # Fill in the template parameters
    rendered_templates = render_templates(templates, arguments)

    # Validate there aren't any unexpected top level directives in any of the files
    # (likely indicates a misspelling)
    for name, template in rendered_templates.items():
        if name == 'dcos-services.yaml':  # yaml list of the service files
            assert isinstance(template, list)
        elif name == 'cloud-config.yaml':
            assert template.keys() <= CLOUDCONFIG_KEYS, template.keys()
        elif isinstance(template, str):  # Not a yaml template
            pass
        else:  # yaml template file
            log.debug("validating template file %s", name)
            assert template.keys() <= PACKAGE_KEYS, template.keys()

    # Extract cc_package_files out of the dcos-config template and put them into
    # the cloud-config package.
    cc_package_files, dcos_config_files = extract_files_with_path(
        rendered_templates['dcos-config.yaml']['package'], cc_package_files)
    rendered_templates['dcos-config.yaml'] = {'package': dcos_config_files}

    # Add a empty pkginfo.json to the cc_package_files.
    # Also assert there isn't one already (can only write out a file once).
    for item in cc_package_files:
        assert item['path'] != '/pkginfo.json'

    # If there aren't any files for a cloud-config package don't make one start
    # existing adding a pkginfo.json
    if len(cc_package_files) > 0:
        cc_package_files.append({"path": "/pkginfo.json", "content": "{}"})

    for item in cc_package_files:
        assert item['path'].startswith('/')
        item[
            'path'] = '/etc/mesosphere/setup-packages/dcos-provider-{}--setup'.format(
                arguments['provider']) + item['path']
        rendered_templates['cloud-config.yaml']['root'].append(item)

    cluster_package_info = {}

    # Render all the cluster packages
    for package_id_str in json.loads(arguments['cluster_packages']):
        package_id = PackageId(package_id_str)
        package_filename = 'packages/{}/{}.tar.xz'.format(
            package_id.name, package_id_str)

        # Build the package
        do_gen_package(rendered_templates[package_id.name + '.yaml'],
                       package_filename)

        cluster_package_info[package_id.name] = {
            'id': package_id_str,
            'filename': package_filename
        }

    # Convert cloud-config to just contain write_files rather than root
    cc = rendered_templates['cloud-config.yaml']

    # Shouldn't contain any packages. Providers should pull what they need to
    # late bind out of other packages via cc_package_file.
    assert 'package' not in cc
    cc_root = cc.pop('root', [])
    # Make sure write_files exists.
    assert 'write_files' not in cc
    cc['write_files'] = []
    # Do the transform
    for item in cc_root:
        assert item['path'].startswith('/')
        cc['write_files'].append(item)
    rendered_templates['cloud-config.yaml'] = cc

    # Add in the add_services util. Done here instead of the initial
    # map since we need to bind in parameters
    def add_services(cloudconfig, cloud_init_implementation):
        return add_units(cloudconfig, rendered_templates['dcos-services.yaml'],
                         cloud_init_implementation)

    utils.add_services = add_services

    return Bunch({
        'arguments': arguments,
        'cluster_packages': cluster_package_info,
        'templates': rendered_templates,
        'utils': utils
    })
示例#21
0
def generate(
        arguments,
        extra_templates=list(),
        cc_package_files=list()):
    # To maintain the old API where we passed arguments rather than the new name.
    user_arguments = arguments
    arguments = None

    sources, targets, templates = get_dcosconfig_source_target_and_templates(user_arguments, extra_templates)

    # TODO(cmaloney): Make it so we only get out the dcosconfig target arguments not all the config target arguments.
    resolver = gen.internals.resolve_configuration(sources, targets, user_arguments)
    status = resolver.status_dict

    if status['status'] == 'errors':
        raise ValidationError(errors=status['errors'], unset=status['unset'])

    argument_dict = {k: v.value for k, v in resolver.arguments.items()}
    log.debug("Final arguments:" + json_prettyprint(argument_dict))

    # expanded_config is a special result which contains all other arguments. It has to come after
    # the calculation of all the other arguments so it can be filled with everything which was
    # calculated. Can't be calculated because that would have an infinite recursion problem (the set
    # of all arguments would want to include itself).
    # Explicitly / manaully setup so that it'll fit where we want it.
    # TODO(cmaloney): Make this late-bound by gen.internals
    argument_dict['expanded_config'] = textwrap.indent(json_prettyprint(argument_dict), prefix='  ' * 3)

    # Fill in the template parameters
    # TODO(cmaloney): render_templates should ideally take the template targets.
    rendered_templates = render_templates(templates, argument_dict)

    # Validate there aren't any unexpected top level directives in any of the files
    # (likely indicates a misspelling)
    for name, template in rendered_templates.items():
        if name == 'dcos-services.yaml':  # yaml list of the service files
            assert isinstance(template, list)
        elif name == 'cloud-config.yaml':
            assert template.keys() <= CLOUDCONFIG_KEYS, template.keys()
        elif isinstance(template, str):  # Not a yaml template
            pass
        else:  # yaml template file
            log.debug("validating template file %s", name)
            assert template.keys() <= PACKAGE_KEYS, template.keys()

    # Extract cc_package_files out of the dcos-config template and put them into
    # the cloud-config package.
    cc_package_files, dcos_config_files = extract_files_with_path(rendered_templates['dcos-config.yaml']['package'],
                                                                  cc_package_files)
    rendered_templates['dcos-config.yaml'] = {'package': dcos_config_files}

    # Add a empty pkginfo.json to the cc_package_files.
    # Also assert there isn't one already (can only write out a file once).
    for item in cc_package_files:
        assert item['path'] != '/pkginfo.json'

    # If there aren't any files for a cloud-config package don't make one start
    # existing adding a pkginfo.json
    if len(cc_package_files) > 0:
        cc_package_files.append({
            "path": "/pkginfo.json",
            "content": "{}"})

    for item in cc_package_files:
        assert item['path'].startswith('/')
        item['path'] = '/etc/mesosphere/setup-packages/dcos-provider-{}--setup'.format(
            argument_dict['provider']) + item['path']
        rendered_templates['cloud-config.yaml']['root'].append(item)

    cluster_package_info = {}

    # Render all the cluster packages
    for package_id_str in json.loads(argument_dict['cluster_packages']):
        package_id = PackageId(package_id_str)
        package_filename = 'packages/{}/{}.tar.xz'.format(
            package_id.name,
            package_id_str)

        # Build the package
        do_gen_package(rendered_templates[package_id.name + '.yaml'], package_filename)

        cluster_package_info[package_id.name] = {
            'id': package_id_str,
            'filename': package_filename
        }

    # Convert cloud-config to just contain write_files rather than root
    cc = rendered_templates['cloud-config.yaml']

    # Shouldn't contain any packages. Providers should pull what they need to
    # late bind out of other packages via cc_package_file.
    assert 'package' not in cc
    cc_root = cc.pop('root', [])
    # Make sure write_files exists.
    assert 'write_files' not in cc
    cc['write_files'] = []
    # Do the transform
    for item in cc_root:
        assert item['path'].startswith('/')
        cc['write_files'].append(item)
    rendered_templates['cloud-config.yaml'] = cc

    # Add in the add_services util. Done here instead of the initial
    # map since we need to bind in parameters
    def add_services(cloudconfig, cloud_init_implementation):
        return add_units(cloudconfig, rendered_templates['dcos-services.yaml'], cloud_init_implementation)

    utils.add_services = add_services

    return Bunch({
        'arguments': argument_dict,
        'cluster_packages': cluster_package_info,
        'templates': rendered_templates,
        'utils': utils
    })
示例#22
0
def generate(arguments,
             extra_templates=list(),
             extra_sources=list(),
             extra_targets=list()):
    # To maintain the old API where we passed arguments rather than the new name.
    user_arguments = arguments
    arguments = None

    sources, targets, templates = get_dcosconfig_source_target_and_templates(
        user_arguments, extra_templates, extra_sources)

    resolver = validate_and_raise(sources, targets + extra_targets)
    argument_dict = get_final_arguments(resolver)
    late_variables = get_late_variables(resolver, sources)
    secret_builtins = [
        'expanded_config_full', 'user_arguments_full', 'config_yaml_full'
    ]
    secret_variables = set(get_secret_variables(sources) + secret_builtins)
    masked_value = '**HIDDEN**'

    # Calculate values for builtin variables.
    user_arguments_masked = {
        k: (masked_value if k in secret_variables else v)
        for k, v in user_arguments.items()
    }
    argument_dict['user_arguments_full'] = json_prettyprint(user_arguments)
    argument_dict['user_arguments'] = json_prettyprint(user_arguments_masked)
    argument_dict['config_yaml_full'] = user_arguments_to_yaml(user_arguments)
    argument_dict['config_yaml'] = user_arguments_to_yaml(
        user_arguments_masked)

    # The expanded_config and expanded_config_full variables contain all other variables and their values.
    # expanded_config is a copy of expanded_config_full with secret values removed. Calculating these variables' values
    # must come after the calculation of all other variables to prevent infinite recursion.
    # TODO(cmaloney): Make this late-bound by gen.internals
    expanded_config_full = {
        k: v
        for k, v in argument_dict.items()
        # Omit late-bound variables whose values have not yet been calculated.
        if not v.startswith(gen.internals.LATE_BIND_PLACEHOLDER_START)
    }
    expanded_config_scrubbed = {
        k: v
        for k, v in expanded_config_full.items() if k not in secret_variables
    }
    argument_dict['expanded_config_full'] = format_expanded_config(
        expanded_config_full)
    argument_dict['expanded_config'] = format_expanded_config(
        expanded_config_scrubbed)

    log.debug("Final arguments:" + json_prettyprint({
        # Mask secret config values.
        k: (masked_value if k in secret_variables else v)
        for k, v in argument_dict.items()
    }))

    # Fill in the template parameters
    # TODO(cmaloney): render_templates should ideally take the template targets.
    rendered_templates = render_templates(templates, argument_dict)

    # Validate there aren't any unexpected top level directives in any of the files
    # (likely indicates a misspelling)
    for name, template in rendered_templates.items():
        if name == 'dcos-services.yaml':  # yaml list of the service files
            assert isinstance(template, list)
        elif name == 'cloud-config.yaml':
            assert template.keys() <= CLOUDCONFIG_KEYS, template.keys()
        elif isinstance(template, str):  # Not a yaml template
            pass
        else:  # yaml template file
            log.debug("validating template file %s", name)
            assert template.keys() <= PACKAGE_KEYS, template.keys()

    stable_artifacts = []

    # Find all files which contain late bind variables and turn them into a "late bind package"
    # TODO(cmaloney): check there are no late bound variables in cloud-config.yaml
    late_files, regular_files = extract_files_containing_late_variables(
        rendered_templates['dcos-config.yaml']['package'])
    # put the regular files right back
    rendered_templates['dcos-config.yaml'] = {'package': regular_files}

    # Render cluster package list artifact.
    cluster_package_list_filename = 'package_lists/{}.package_list.json'.format(
        argument_dict['cluster_package_list_id'])
    os.makedirs(os.path.dirname(cluster_package_list_filename),
                mode=0o755,
                exist_ok=True)
    write_string(cluster_package_list_filename,
                 argument_dict['cluster_packages'])
    log.info('Cluster package list: {}'.format(cluster_package_list_filename))
    stable_artifacts.append(cluster_package_list_filename)

    def make_package_filename(package_id, extension):
        return 'packages/{0}/{1}{2}'.format(package_id.name, repr(package_id),
                                            extension)

    # Render all the cluster packages
    cluster_package_info = {}

    # Prepare late binding config, if any.
    late_package = build_late_package(late_files, argument_dict['config_id'],
                                      argument_dict['provider'])
    if late_variables:
        # Render the late binding package. This package will be downloaded onto
        # each cluster node during bootstrap and rendered into the final config
        # using the values from the late config file.
        late_package_id = PackageId(late_package['name'])
        late_package_filename = make_package_filename(late_package_id,
                                                      '.dcos_config')
        os.makedirs(os.path.dirname(late_package_filename), mode=0o755)
        write_yaml(late_package_filename, {'package': late_package['package']},
                   default_flow_style=False)
        log.info('Package filename: {}'.format(late_package_filename))
        stable_artifacts.append(late_package_filename)

        # Add the late config file to cloud config. The expressions in
        # late_variables will be resolved by the service handling the cloud
        # config (e.g. Amazon CloudFormation). The rendered late config file
        # on a cluster node's filesystem will contain the final values.
        rendered_templates['cloud-config.yaml']['root'].append({
            'path':
            '/etc/mesosphere/setup-flags/late-config.yaml',
            'permissions':
            '0644',
            'owner':
            'root',
            # TODO(cmaloney): don't prettyprint to save bytes.
            # NOTE: Use yaml here simply to make avoiding painful escaping and
            # unescaping easier.
            'content':
            render_yaml({
                'late_bound_package_id': late_package['name'],
                'bound_values': late_variables
            })
        })

    # Collect metadata for cluster packages.
    for package_id_str in json.loads(argument_dict['cluster_packages']):
        package_id = PackageId(package_id_str)
        package_filename = make_package_filename(package_id, '.tar.xz')

        cluster_package_info[package_id.name] = {
            'id': package_id_str,
            'filename': package_filename
        }

    # Render config packages.
    config_package_ids = json.loads(argument_dict['config_package_ids'])
    for package_id_str in config_package_ids:
        package_id = PackageId(package_id_str)
        package_filename = cluster_package_info[package_id.name]['filename']
        do_gen_package(rendered_templates[package_id.name + '.yaml'],
                       cluster_package_info[package_id.name]['filename'])
        stable_artifacts.append(package_filename)

    # Convert cloud-config to just contain write_files rather than root
    cc = rendered_templates['cloud-config.yaml']

    # Shouldn't contain any packages. Providers should pull what they need to
    # late bind out of other packages via cc_package_file.
    assert 'package' not in cc
    cc_root = cc.pop('root', [])
    # Make sure write_files exists.
    assert 'write_files' not in cc
    cc['write_files'] = []
    # Do the transform
    for item in cc_root:
        assert item['path'].startswith('/')
        cc['write_files'].append(item)
    rendered_templates['cloud-config.yaml'] = cc

    # Add in the add_services util. Done here instead of the initial
    # map since we need to bind in parameters
    def add_services(cloudconfig, cloud_init_implementation):
        return add_units(cloudconfig, rendered_templates['dcos-services.yaml'],
                         cloud_init_implementation)

    utils.add_services = add_services

    return Bunch({
        'arguments': argument_dict,
        'cluster_packages': cluster_package_info,
        'stable_artifacts': stable_artifacts,
        'templates': rendered_templates,
        'utils': utils
    })
示例#23
0
文件: actions.py 项目: alberts/dcos
def _do_bootstrap(install, repository):
    # These files should be set by the environment which initially builds
    # the host (cloud-init).
    repository_url = if_exists(load_string, install.get_config_filename("setup-flags/repository-url"))

    # TODO(cmaloney): If there is 1+ master, grab the active config from a master.
    # If the config can't be grabbed from any of them, fail.
    def fetcher(id, target):
        if repository_url is None:
            raise ValidationError("ERROR: Non-local package {} but no repository url given.".format(repository_url))
        return requests_fetcher(repository_url, id, target, os.getcwd())

    # Copy host/cluster-specific packages written to the filesystem manually
    # from the setup-packages folder into the repository. Do not overwrite or
    # merge existing packages, hard fail instead.
    setup_packages_to_activate = []
    setup_pkg_dir = install.get_config_filename("setup-packages")
    copy_fetcher = partial(_copy_fetcher, setup_pkg_dir)
    if os.path.exists(setup_pkg_dir):
        for pkg_id_str in os.listdir(setup_pkg_dir):
            print("Installing setup package: {}".format(pkg_id_str))
            if not PackageId.is_id(pkg_id_str):
                raise ValidationError("Invalid package id in setup package: {}".format(pkg_id_str))
            pkg_id = PackageId(pkg_id_str)
            if pkg_id.version != "setup":
                raise ValidationError(
                    "Setup packages (those in `{0}`) must have the version setup. "
                    "Bad package: {1}".format(setup_pkg_dir, pkg_id_str))

            # Make sure there is no existing package
            if repository.has_package(pkg_id_str):
                print("WARNING: Ignoring already installed package {}".format(pkg_id_str))

            repository.add(copy_fetcher, pkg_id_str)
            setup_packages_to_activate.append(pkg_id_str)

    # If active.json is set on the host, use that as the set of packages to
    # activate. Otherwise just use the set of currently active packages (those
    # active in the bootstrap tarball)
    to_activate = None
    active_path = install.get_config_filename("setup-flags/active.json")
    if os.path.exists(active_path):
        print("Loaded active packages from", active_path)
        to_activate = load_json(active_path)

        # Ensure all packages are local
        print("Ensuring all packages in active set {} are local".format(",".join(to_activate)))
        for package in to_activate:
            repository.add(fetcher, package)
    else:
        print("Calculated active packages from bootstrap tarball")
        to_activate = list(install.get_active())

        # Fetch and activate all requested additional packages to accompany the bootstrap packages.
        cluster_packages_filename = install.get_config_filename("setup-flags/cluster-packages.json")
        cluster_packages = if_exists(load_json, cluster_packages_filename)
        print("Checking for cluster packages in:", cluster_packages_filename)
        if cluster_packages:
            if not isinstance(cluster_packages, list):
                print('ERROR: {} should contain a JSON list of packages. Got a {}'.format(cluster_packages_filename,
                                                                                          type(cluster_packages)))
            print("Loading cluster-packages: {}".format(cluster_packages))

            for package_id_str in cluster_packages:
                # Validate the package ids
                pkg_id = PackageId(package_id_str)

                # Fetch the packages if not local
                if not repository.has_package(package_id_str):
                    repository.add(fetcher, package_id_str)

                # Add the package to the set to activate
                setup_packages_to_activate.append(package_id_str)
        else:
            print("No cluster-packages specified")

    # Calculate the full set of final packages (Explicit activations + setup packages).
    # De-duplicate using a set.
    to_activate = list(set(to_activate + setup_packages_to_activate))

    print("Activating packages")
    install.activate(repository.load_packages(to_activate))