예제 #1
0
    def preflight(self):
        runtime_deps = list(self.dependencies(Scope.RUN, recurse=False))
        if runtime_deps:
            raise ElementError(
                "{}: Only build type dependencies supported by flatpak_image elements"
                .format(self))

        sources = list(self.sources())
        if sources:
            raise ElementError(
                "{}: flatpak_image elements may not have sources".format(self))
예제 #2
0
    def preflight(self):
        runtime_deps = list(self.dependencies(Scope.RUN, recurse=False))
        if runtime_deps:
            raise ElementError("{}: Only build type dependencies supported by collect-integration elements"
                               .format(self))

        sources = list(self.sources())
        if sources:
            raise ElementError("{}: collect-integration elements may not have sources".format(self))

        for ignore in self.ignore:
            if self.search(Scope.BUILD, ignore) is None:
                raise ElementError("{}: element {} is not in dependencies".format(self, ignore))
예제 #3
0
    def _packages_list(self):
        input_elm = self.search(Scope.BUILD, self.__input)
        if not input_elm:
            detail = ("Available elements are {}".format("\n".join(
                [x.name for x in self.dependencies(Scope.BUILD)])))
            raise ElementError("{} Failed to find element {}".format(
                self.name, self.__input),
                               detail=detail)

        bstdata = input_elm.get_public_data("bst")
        if "dpkg-data" not in bstdata:
            raise ElementError(
                "{}: Can't get package list for {}, no bst.dpkg-data".format(
                    self.name, self.__input))
        return " ".join([k for k, v in self.node_items(bstdata["dpkg-data"])])
    def extract_cpe(self, dep):
        cpe = dep.get_public_data("cpe")

        sources = list(dep.sources())

        if cpe is None:
            cpe = {}
        else:
            cpe = cpe.strip_node_info()

        if "product" not in cpe:
            cpe["product"] = os.path.basename(os.path.splitext(dep.name)[0])

        version_match = cpe.pop("version-match", None)

        if "version" not in cpe:
            matcher = VersionMatcher(version_match)
            version = matcher.get_version(sources)
            self.info("{} version {}".format(
                dep,
                version,
            ))

            if version is None:
                if version_match is None:
                    self.status("Missing version to {}.".format(dep))
                else:
                    fmt = '{}: {}: version match string "{}" did not match anything.'
                    msg = fmt.format(self, dep, version_match)
                    raise ElementError(msg)

            if version:
                cpe["version"] = version

        return cpe
    def configure_dependencies(self, dependencies):

        self.__input = None

        for dep in dependencies:

            # Determine the location to stage each element, default is "/"
            input_element = False
            if dep.config:
                dep.config.validate_keys(["input"])
                input_element = dep.config.get_bool("input", False)

            # Add each element to the layout
            if input_element:
                self.layout_add(dep.element, dep.path,
                                self.get_variable("build-root"))

                # Hold on to the input element
                self.__input = dep.element
            else:
                self.layout_add(dep.element, dep.path, "/")

        if self.__input is None:
            raise ElementError(
                "{}: No dependency specified as the input element".format(
                    self))
예제 #6
0
    def extract_cpe(self, dep):
        cpe = dep.get_public_data('cpe')

        sources = list(dep.sources())

        if cpe is None:
            cpe = {}

        if 'product' not in cpe:
            cpe['product'] = os.path.basename(os.path.splitext(dep.name)[0])


        version_match = cpe.pop('version-match', None)

        if 'version' not in cpe:
            matcher = VersionMatcher(version_match)
            version = matcher.get_version(sources)
            self.info("{} version {}".format(dep, version, ))

            if version is None:
                if version_match is None:
                    self.status('Missing version to {}.'.format(dep))
                else:
                    raise ElementError('{}: {}: version match string "{}" did not match anything.'.format(self, dep, version_match))

            if version:
                cpe['version'] = version

        return cpe
예제 #7
0
    def configure(self, node):

        node.validate_keys([
            "path", "options", "cache-junction-elements",
            "ignore-junction-remotes", "overrides"
        ])

        self.path = node.get_str("path", default="")
        self.options = node.get_mapping("options", default={})
        self.cache_junction_elements = node.get_bool("cache-junction-elements",
                                                     default=False)
        self.ignore_junction_remotes = node.get_bool("ignore-junction-remotes",
                                                     default=False)

        # The overrides dictionary has the target junction
        # to override as a key, and the ScalarNode of the
        # junction name as a value
        self.overrides = {}
        overrides_node = node.get_mapping("overrides", {})
        for key, junction_name in overrides_node.items():

            # Cannot override a subproject with the project itself
            #
            if junction_name.as_str() == self.name:
                raise ElementError(
                    "{}: Attempt to override subproject junction '{}' with the overriding junction '{}' itself"
                    .format(junction_name.get_provenance(), key,
                            junction_name.as_str()),
                    reason="override-junction-with-self",
                )
            self.overrides[key] = junction_name
예제 #8
0
    def preflight(self):
        sources = list(self.sources())
        if sources:
            raise ElementError(
                "{}: collect-integration elements may not have sources".format(
                    self
                )
            )

        for ignore in self.ignore:
            if self.search(ignore) is None:
                raise ElementError(
                    "{}: element {} is not in dependencies".format(
                        self, ignore
                    )
                )
예제 #9
0
    def assemble(self, sandbox):

        # Stage sources into the input directory
        self.stage_sources(sandbox, "input")

        rootdir = sandbox.get_virtual_directory()
        inputdir = rootdir.open_directory("input")
        outputdir = rootdir.open_directory("output", create=True)

        # The directory to grab
        inputdir = inputdir.open_directory(self.source.strip(os.sep))

        # The output target directory
        outputdir = outputdir.open_directory(self.target.strip(os.sep),
                                             create=True)

        if not inputdir:
            raise ElementError(
                "{}: No files were found inside directory '{}'".format(
                    self, self.source))

        # Move it over
        outputdir.import_files(inputdir)

        # And we're done
        return "/output"
예제 #10
0
 def run_command(*command):
     exitcode = sandbox.run(command, SandboxFlags.ROOT_READ_ONLY)
     if exitcode != 0:
         raise ElementError(
             "Command '{}' failed with exitcode {}".format(
                 " ".join(command), exitcode
             )
         )
예제 #11
0
    def preflight(self):
        # Assert that we have at least one source to fetch.

        sources = list(self.sources())
        if not sources:
            raise ElementError(
                "{}: An import element must have at least one source.".format(
                    self))
예제 #12
0
    def configure(self, node):
        node.validate_keys(["filename", "compression"])
        self.filename = self.node_subst_vars(node.get_scalar("filename"))
        self.compression = node.get_str("compression")

        if self.compression not in ["none", "gzip", "xz", "bzip2"]:
            raise ElementError("{}: Invalid compression option {}".format(
                self, self.compression))
예제 #13
0
    def configure(self, node):
        self.node_validate(node, ['filename', 'compression'])
        self.filename = self.node_subst_member(node, 'filename')
        self.compression = self.node_get_member(node, str, 'compression')

        if self.compression not in ['none', 'gzip', 'xz', 'bzip2']:
            raise ElementError("{}: Invalid compression option {}".format(
                self, self.compression))
    def _packages_list(self):

        bstdata = self.__input.get_public_data("bst")
        if "dpkg-data" not in bstdata:
            raise ElementError(
                "{}: Can't get package list for {}, no bst.dpkg-data".format(
                    self, self.__input.name))

        dpkg_data = bstdata.get_mapping("dpkg-data", {})
        return " ".join(dpkg_data.keys())
예제 #15
0
    def configure_dependencies(self, dependencies):

        self._flatpaks = []

        for dep in dependencies:
            flatpak_image_dep = False
            flatpak_stack_dep = False

            if dep.config:
                dep.config.validate_keys(["flatpak-image", "flatpak-stack"])
                flatpak_image_dep = dep.config.get_bool("flatpak-image", False)
                flatpak_stack_dep = dep.config.get_bool("flatpak-stack", False)

                if flatpak_image_dep and flatpak_stack_dep:
                    raise ElementError(
                        "{}: Dependency specified as both a flatpak image and a stack"
                        .format(dep.config.get_provenance()))

            if flatpak_image_dep:
                self._layout_flatpak(dep.element, dep.path)
            elif flatpak_stack_dep:
                for flatpak_image in dep.element.dependencies(recurse=False):
                    self._layout_flatpak(flatpak_image,
                                         dep.path,
                                         is_stack=True)
            else:
                self.layout_add(dep.element, dep.path, "/")

        if not self._flatpaks:
            raise ElementError(
                "{}: No flatpak images specified for this repo".format(self))

        # Add these commands after laying out the flaptaks, which also adds commands.
        #
        for src, dest in self._copy_refs:
            self.add_commands(
                "copy ref {} -> {}".format(src, dest),
                [
                    "flatpak build-commit-from --src-ref={} /buildstream/repo {}"
                    .format(src, dest)
                ],
            )
예제 #16
0
    def preflight(self):
        # Exactly one build-depend is permitted
        build_deps = list(self.dependencies(Scope.BUILD, recurse=False))
        if len(build_deps) != 1:
            detail = "Full list of build-depends:\n"
            deps_list = "  \n".join([x.name for x in build_deps])
            detail += deps_list
            raise ElementError(
                "{}: {} element must have exactly 1 build-dependency, actually have {}"
                .format(self,
                        type(self).__name__, len(build_deps)),
                detail=detail,
                reason="filter-bdepend-wrong-count",
            )

        # That build-depend must not also be a runtime-depend
        runtime_deps = list(self.dependencies(Scope.RUN, recurse=False))
        if build_deps[0] in runtime_deps:
            detail = "Full list of runtime depends:\n"
            deps_list = "  \n".join([x.name for x in runtime_deps])
            detail += deps_list
            raise ElementError(
                "{}: {} element's build dependency must not also be a runtime dependency"
                .format(self,
                        type(self).__name__),
                detail=detail,
                reason="filter-bdepend-also-rdepend",
            )

        # If a parent does not produce an artifact, fail and inform user that the dependency
        # must produce artifacts
        if not build_deps[0].BST_ELEMENT_HAS_ARTIFACT:
            detail = "{} does not produce an artifact, so there is nothing to filter".format(
                build_deps[0].name)
            raise ElementError(
                "{}: {} element's build dependency must produce an artifact".
                format(self,
                       type(self).__name__),
                detail=detail,
                reason="filter-bdepend-no-artifact",
            )
    def configure(self, node):
        node.validate_keys(["build-commands"])

        self.unedited_cmds = {}
        if "build-commands" not in node:
            raise ElementError(
                "{}: Unexpectedly missing command: 'build-commands'".format(
                    self))
        self.unedited_cmds["build-commands"] = node.get_str_list(
            "build-commands")
        self.set_work_dir()
        self.set_install_root()
        self.set_root_read_only(True)
예제 #18
0
    def assemble(self, sandbox):
        self.stage_sources(sandbox, 'input')

        basedir = sandbox.get_directory()
        allfiles = os.path.join(basedir, 'buildstream', 'allfiles')
        reldirectory = os.path.relpath(self.directory, '/')
        subdir = os.path.join(allfiles, reldirectory)
        exportsrcdir = os.path.join(allfiles, self.export)
        etcdir = os.path.join(allfiles, 'etc')
        installdir = os.path.join(basedir, 'buildstream', 'install')
        filesdir = os.path.join(installdir, 'files')
        exportdir = os.path.join(installdir, 'export')
        filesetcdir = os.path.join(filesdir, 'etc')
        stagedir = os.path.join(os.sep, 'buildstream', 'allfiles')

        os.makedirs(allfiles, exist_ok=True)
        os.makedirs(filesdir, exist_ok=True)
        if self.metadata.has_section('Application'):
            os.makedirs(exportdir, exist_ok=True)

        for section in self.metadata.sections():
            if section.startswith('Extension '):
                try:
                    extensiondir = self.metadata.get(section, 'directory')
                    os.makedirs(os.path.join(installdir, 'files',
                                             extensiondir),
                                exist_ok=True)
                except PermissionError as e:
                    raise ElementError(
                        "Permission denied: Cannot create {}".format(
                            extensiondir))

        with self.timed_activity("Creating flatpak image", silent_nested=True):
            self.stage_dependency_artifacts(sandbox,
                                            Scope.BUILD,
                                            path=stagedir,
                                            include=self.include,
                                            exclude=self.exclude)
            utils.link_files(subdir, filesdir)
            if os.path.exists(etcdir):
                utils.link_files(etcdir, filesetcdir)

            if os.path.isdir(exportsrcdir):
                utils.link_files(exportsrcdir, exportdir)
            elif self.metadata.has_section('Application'):
                os.makedirs(exportdir, exist_ok=True)

        metadatafile = os.path.join(installdir, 'metadata')
        with open(metadatafile, "w") as m:
            self.metadata.write(m)
        return os.path.join(os.sep, 'buildstream', 'install')
예제 #19
0
    def configure(self, node):
        command_steps = ["create_dev_proc_shadow", "create_img", "install_img"]
        node.validate_keys(command_steps)

        for step in command_steps:
            if step not in node:
                raise ElementError(
                    "{}: Unexpectedly missing command step '{}'".format(
                        self, step))
            cmds = node.get_str_list(step)
            self.add_commands(step, cmds)

        self.set_work_dir()
        self.set_install_root()
        self.set_root_read_only(True)
예제 #20
0
    def _layout_flatpaks(self, elements):
        def staging_dir(elt):
            return '/buildstream/input/{}'.format(elt.name)

        def export_command(elt):
            return 'flatpak build-export --files=files --arch={} /buildstream/repo {} {}'\
                .format(self._arch, staging_dir(elt), self._branch)

        for elt in elements:
            if elt.get_kind() == 'flatpak_image':
                self.layout_add(elt.name, staging_dir(elt))
                self.add_commands('export {}'.format(elt.name), [export_command(elt)])
            elif elt.get_kind() == 'stack':
                self._layout_flatpaks(elt.dependencies(Scope.RUN, recurse=False))
            else:
                raise ElementError('Dependency {} is not of kind flatpak_image'.format(elt.name))
예제 #21
0
    def configure(self, node):
        self.node_validate(node, ['build-commands', 'base', 'input'])

        self.__input = self.node_subst_member(node, 'input')
        self.layout_add(self.node_subst_member(node, 'base'), "/")
        self.layout_add(None, '/buildstream')
        self.layout_add(self.__input, self.get_variable('build-root'))
        self.unedited_cmds = {}
        if 'build-commands' not in node:
            raise ElementError(
                "{}: Unexpectedly missing command: 'build-commands'".format(
                    self))
        cmds = self.node_subst_list(node, 'build-commands')
        self.unedited_cmds['build-commands'] = cmds

        self.set_work_dir()
        self.set_install_root()
        self.set_root_read_only(True)
예제 #22
0
    def preflight(self):

        # Assert that all dependencies are both build and runtime dependencies.
        #
        all_deps = list(self._dependencies(_Scope.ALL, recurse=False))
        run_deps = list(self._dependencies(_Scope.RUN, recurse=False))
        build_deps = list(self._dependencies(_Scope.BUILD, recurse=False))
        if any(dep not in run_deps
               for dep in all_deps) or any(dep not in build_deps
                                           for dep in all_deps):
            # There is no need to specify the `self` provenance here in preflight() errors, as the base class
            # will take care of prefixing these for plugin author convenience.
            raise ElementError(
                "All dependencies of 'stack' elements must be both build and runtime dependencies",
                detail=
                "Make sure you declare all dependencies in the `depends` list, without specifying any `type`.",
                reason="stack-requires-build-and-run",
            )
    def configure(self, node):
        self.node_validate(node, ["dependency_scope", "path"])
        self.path = self.node_subst_member(node, "path")

        dependency_scope = self.node_subst_member(node,
                                                  "dependency_scope").lower()
        if dependency_scope == "run":
            self.dep_scope = Scope.RUN
        elif dependency_scope == "build":
            self.dep_scope = Scope.BUILD
        elif dependency_scope == "all":
            self.dep_scope = Scope.ALL
        elif dependency_scope == "none":
            self.dep_scope = None
        else:
            raise ElementError(
                f"Incorrect value supplied for depstype: {dependency_scope}"
                "\nAcceptable values: run, build, all")
예제 #24
0
    def configure_dependencies(self, dependencies):
        have_input = False
        for dep in dependencies:
            input_dep = False
            # Separate base dependencies from input dependencies
            if dep.config:
                dep.config.validate_keys(["input"])
                input_dep = dep.config.get_bool("input", False)

            if input_dep:
                have_input = True
                self.layout_add(dep.element, dep.path,
                                self.get_variable("build-root"))
            else:
                self.layout_add(dep.element, dep.path, "/")

        if not have_input:
            raise ElementError(
                "{}: No 'input' dependency specified".format(self))
예제 #25
0
    def assemble(self, sandbox):
        with self.timed_activity("Staging artifact", silent_nested=True):
            for dep in self.dependencies(Scope.BUILD, recurse=False):
                # Check that all the included/excluded domains exist
                pub_data = dep.get_public_data("bst")
                split_rules = pub_data.get_mapping("split-rules", {})
                unfound_includes = []
                for domain in self.include:
                    if domain not in split_rules:
                        unfound_includes.append(domain)
                unfound_excludes = []
                for domain in self.exclude:
                    if domain not in split_rules:
                        unfound_excludes.append(domain)

                detail = []
                if unfound_includes:
                    detail.append("Unknown domains were used in {}".format(
                        self.include_node.get_provenance()))
                    detail.extend([
                        " - {}".format(domain) for domain in unfound_includes
                    ])

                if unfound_excludes:
                    detail.append("Unknown domains were used in {}".format(
                        self.exclude_node.get_provenance()))
                    detail.extend([
                        " - {}".format(domain) for domain in unfound_excludes
                    ])

                if detail:
                    detail = "\n".join(detail)
                    raise ElementError("Unknown domains declared.",
                                       detail=detail)

                dep.stage_artifact(sandbox,
                                   include=self.include,
                                   exclude=self.exclude,
                                   orphans=self.include_orphans)
        return ""
예제 #26
0
    def _build_image(self, sandbox, image, root, output):
        parent = os.path.join(root, 'parent')
        parent_checkout = os.path.join(root, 'parent_checkout')

        if 'layer' in image:
            if os.path.exists(parent_checkout):
                shutil.rmtree(parent_checkout)
            os.makedirs(os.path.join(parent_checkout))

        layer_descs = []
        layer_files = []
        diff_ids = []
        history = None
        legacy_parent = None

        config = {}
        if 'author' in image:
            config['author'] = image['author']
        config['architecture'] = image['architecture']
        config['os'] = image['os']
        if 'config' in image:
            config['config'] = {}
            for k, v in image['config'].items():
                if k in ['ExposedPorts', 'Volumes']:
                    config['config'][k] = {}
                    for value in v:
                        config['config'][k][value] = {}
                else:
                    config['config'][k] = v

        if 'parent' in image:
            if os.path.exists(parent):
                shutil.rmtree(parent)
            parent_dep = self.search(Scope.BUILD, image['parent']['element'])
            if not parent_dep:
                raise ElementError(
                    '{}: Element not in dependencies: {}'.format(
                        self, image['parent']['element']))

            parent_dep.stage_dependency_artifacts(sandbox,
                                                  Scope.RUN,
                                                  path='parent')
            if not os.path.exists(os.path.join(parent, 'index.json')):
                with open(os.path.join(parent, 'manifest.json'),
                          'r',
                          encoding='utf-8') as f:
                    parent_index = json.load(f)
                parent_image = parent_index[image['parent']['image']]
                layers = parent_image['Layers']

                with open(os.path.join(parent,
                                       safe_path(parent_image['Config'])),
                          'r',
                          encoding='utf-8') as f:
                    image_config = json.load(f)
                diff_ids = image_config['rootfs']['diff_ids']

                if 'history' in image_config:
                    history = image_config['history']

                for i, layer in enumerate(layers):
                    _, diff_id = diff_ids[i].split(':', 1)
                    with open(os.path.join(parent, safe_path(layer)),
                              'rb') as origblob:
                        if self.gzip:
                            targz_blob = blob(
                                output,
                                media_type=
                                'application/vnd.oci.image.layer.v1.tar+gzip',
                                mode=self.mode)
                            with targz_blob.create() as gzipfile:
                                with gzip.GzipFile(filename=diff_id,
                                                   fileobj=gzipfile,
                                                   mode='wb',
                                                   mtime=1320937200) as gz:
                                    shutil.copyfileobj(origblob, gz)
                            layer_descs.append(targz_blob.descriptor)
                            layer_files.append(targz_blob.filename)
                            legacy_parent = tar_blob.legacy_id
                        else:
                            legacy_config = {'os': image['os']}
                            if legacy_parent:
                                legacy_config['parent'] = legacy_parent
                            tar_blob = blob(
                                output,
                                media_type=
                                'application/vnd.oci.image.layer.v1.tar',
                                mode=self.mode)
                            with tar_blob.create() as newfile:
                                shutil.copyfileobj(origblob, newfile)
                            layer_descs.append(tar_blob.descriptor)
                            layer_files.append(tar_blob.filename)
                            legacy_parent = tar_blob.legacy_id
            else:
                with open(os.path.join(parent, 'index.json'),
                          'r',
                          encoding='utf-8') as f:
                    parent_index = json.load(f)
                parent_image_desc = \
                    parent_index['manifests'][image['parent']['image']]
                algo, h = parent_image_desc['digest'].split(':', 1)
                with open(os.path.join(parent, 'blobs', safe_path(algo),
                                       safe_path(h)),
                          'r',
                          encoding='utf-8') as f:
                    image_manifest = json.load(f)
                algo, h = image_manifest['config']['digest'].split(':', 1)
                with open(os.path.join(parent, 'blobs', safe_path(algo),
                                       safe_path(h)),
                          'r',
                          encoding='utf-8') as f:
                    image_config = json.load(f)
                diff_ids = image_config['rootfs']['diff_ids']
                if 'history' in image_config:
                    history = image_config['history']
                for i, layer in enumerate(image_manifest['layers']):
                    _, diff_id = diff_ids[i].split(':', 1)
                    algo, h = layer['digest'].split(':', 1)
                    origfile = os.path.join(parent, 'blobs', safe_path(algo),
                                            safe_path(h))
                    with ExitStack() as e:
                        if 'layer' not in image and i + 1 == len(
                                image_manifest['layers']):
                            # The case were we do not add a layer, the last imported layer has to be fully reconfigured
                            legacy_config = {}
                            legacy_config.update(config)
                            if legacy_parent:
                                legacy_config['parent'] = legacy_parent
                        else:
                            legacy_config = {'os': image['os']}
                        if legacy_parent:
                            legacy_config['parent'] = legacy_parent
                        if self.gzip:
                            output_blob = blob(
                                output,
                                media_type=
                                'application/vnd.oci.image.layer.v1.tar+gzip',
                                mode=self.mode)
                        else:
                            output_blob = blob(
                                output,
                                media_type=
                                'application/vnd.oci.image.layer.v1.tar',
                                mode=self.mode,
                                legacy_config=legacy_config)
                        outp = e.enter_context(output_blob.create())
                        inp = e.enter_context(open(origfile, 'rb'))
                        if layer['mediaType'].endswith('+gzip'):
                            if self.gzip:
                                shutil.copyfileobj(inp, outp)
                            else:
                                gz = e.enter_context(
                                    gzip.open(filename=inp, mode='rb'))
                                shutil.copyfileobj(gz, outp)
                        else:
                            if self.gzip:
                                gz = e.enter_context(
                                    gzip.GzipFile(filename=diff_id,
                                                  fileobj=outp,
                                                  mode='wb',
                                                  mtime=1320937200))
                                shutil.copyfileobj(inp, gz)
                            else:
                                shutil.copyfileobj(inp, outp)

                    layer_descs.append(output_blob.descriptor)
                    layer_files.append(output_blob.filename)
                    legacy_parent = output_blob.legacy_id

        if 'parent' in image and 'layer' in image:
            unpacked = False
            if isinstance(parent_dep, OciElement):
                # Here we read the parent configuration to checkout
                # the artifact which is much faster than unpacking the tar
                # files.
                layers = []
                parent_image = image['parent']['image']
                for layer in parent_dep.images[parent_image]['layer']:
                    layer_dep = parent_dep.search(Scope.BUILD, layer)
                    if not layer_dep:
                        raise ElementError(
                            '{}: Element not in dependencies: {}'.format(
                                parent_dep, layer))

                    # We need to verify dependencies. If not in current
                    # element's dependencies, then we cannnot safely assume
                    # it is cached. Parent could be cached while its
                    # dependencies either removed or not pulled.
                    if layer_dep != self.search(Scope.BUILD, layer):
                        self.warn(
                            'In order to optimize building of {}, you should add {} as build dependency'
                            .format(self.name, layer))
                        layers = None
                        break
                    else:
                        layers.append(layer_dep)
                if layers is not None:
                    with self.timed_activity(
                            'Checking out layer from {}'.format(
                                parent_dep.name)):
                        for layer_dep in layers:
                            layer_dep.stage_dependency_artifacts(
                                sandbox, Scope.RUN, path='parent_checkout')
                        unpacked = True

            if not unpacked:
                for layer in layer_files:
                    if self.gzip:
                        mode = 'r:gz'
                    else:
                        mode = 'r:'
                    with self.timed_activity(
                            'Decompressing layer {}'.format(layer)):
                        with tarfile.open(layer, mode=mode) as t:
                            members = []
                            for info in t.getmembers():
                                if '/../' in info.name:
                                    continue
                                if info.name.startswith('../'):
                                    continue

                                dirname, basename = os.path.split(info.name)
                                if basename == '.wh..wh..opq':
                                    for entry in os.listdir(
                                            os.path.join(
                                                parent_checkout, dirname)):
                                        full_entry = os.path.join(
                                            parent_checkout, dirname, entry)
                                        if os.path.islink(
                                                full_entry
                                        ) or not os.path.isdir(full_entry):
                                            os.unlink(full_entry)
                                        else:
                                            shutil.rmtree(full_entry)
                                elif basename.startswith('.wh.'):
                                    full_entry = os.path.join(
                                        parent_checkout, dirname, basename[4:])
                                    if os.path.islink(
                                            full_entry
                                    ) or not os.path.isdir(full_entry):
                                        os.unlink(full_entry)
                                    else:
                                        shutil.rmtree(full_entry)
                                else:
                                    members.append(info)

                            t.extractall(path=parent_checkout, members=members)

        legacy_config = {}
        legacy_config.update(config)
        if legacy_parent:
            legacy_config['parent'] = legacy_parent

        if 'layer' in image:
            deps = []
            for name in image['layer']:
                dep = self.search(Scope.BUILD, name)
                dep.stage_dependency_artifacts(sandbox,
                                               Scope.RUN,
                                               path='layer')

            layer = os.path.join(root, 'layer')
            with self.timed_activity('Transforming into layer'):
                for root, dirs, files in os.walk(parent_checkout):
                    for f in itertools.chain(files, dirs):
                        rel = os.path.relpath(os.path.join(root, f),
                                              parent_checkout)
                        if not os.path.lexists(os.path.join(layer, rel)) \
                           and os.path.lexists(os.path.dirname(os.path.join(layer, rel))):
                            whfile = os.path.join(
                                layer, os.path.relpath(root, parent_checkout),
                                '.wh.' + f)
                            with open(whfile, 'w') as f:
                                pass

                if 'parent' in image:
                    for root, dirs, files in os.walk(layer):
                        for f in files:
                            new = os.path.join(root, f)
                            rel = os.path.relpath(os.path.join(root, f), layer)
                            old = os.path.join(parent_checkout, rel)
                            if os.path.lexists(old):
                                old_st = os.lstat(old)
                                new_st = os.lstat(new)
                                if old_st.st_mode != new_st.st_mode:
                                    continue
                                if int(old_st.st_mtime) != int(
                                        new_st.st_mtime):
                                    continue
                                if stat.S_ISLNK(old_st.st_mode):
                                    if os.readlink(old) == os.readlink(new):
                                        os.unlink(new)
                                else:
                                    if filecmp.cmp(new, old):
                                        os.unlink(new)

            with tempfile.TemporaryFile(mode='w+b') as tfile:
                with tarfile.open(fileobj=tfile, mode='w:') as t:
                    with self.timed_activity('Building layer tar'):
                        for root, dirs, files in os.walk(layer):
                            dirs.sort()
                            for f in itertools.chain(sorted(files), dirs):
                                path = os.path.join(root, f)
                                arcname = os.path.relpath(path, layer)
                                st = os.lstat(path)
                                tinfo = tarfile.TarInfo(name=arcname)
                                tinfo.uid = 0
                                tinfo.gid = 0
                                tinfo.mode = stat.S_IMODE(st.st_mode)
                                tinfo.mtime = st.st_mtime
                                if stat.S_ISDIR(st.st_mode):
                                    tinfo.type = tarfile.DIRTYPE
                                    t.addfile(tinfo, None)
                                elif stat.S_ISREG(st.st_mode):
                                    tinfo.type = tarfile.REGTYPE
                                    tinfo.size = st.st_size
                                    with open(path, 'rb') as fd:
                                        t.addfile(tinfo, fd)
                                elif stat.S_ISLNK(st.st_mode):
                                    tinfo.type = tarfile.SYMTYPE
                                    tinfo.linkname = os.readlink(path)
                                    t.addfile(tinfo, None)
                                else:
                                    raise ElementError(
                                        '{}: Unexpected file type for: {}'.
                                        format(self, arcname))
                tfile.seek(0)
                tar_hash = hashlib.sha256()
                with self.timed_activity('Hashing layer'):
                    while True:
                        data = tfile.read(16 * 1024)
                        if len(data) == 0:
                            break
                        tar_hash.update(data)
                tfile.seek(0)
                if self.gzip:
                    targz_blob = blob(
                        output,
                        media_type=
                        'application/vnd.oci.image.layer.v1.tar+gzip',
                        mode=self.mode)
                    with self.timed_activity('Compressing layer'):
                        with targz_blob.create() as gzipfile:
                            with gzip.GzipFile(filename=tar_hash.hexdigest(),
                                               fileobj=gzipfile,
                                               mode='wb',
                                               mtime=1320937200) as gz:
                                shutil.copyfileobj(tfile, gz)
                    layer_descs.append(targz_blob.descriptor)
                else:
                    copied_blob = blob(
                        output,
                        media_type='application/vnd.oci.image.layer.v1.tar',
                        mode=self.mode,
                        legacy_config=legacy_config)
                    with copied_blob.create() as copiedfile:
                        shutil.copyfileobj(tfile, copiedfile)
                    layer_descs.append(copied_blob.descriptor)
                    legacy_parent = copied_blob.legacy_id

            diff_ids.append('sha256:{}'.format(tar_hash.hexdigest()))

        if not history:
            history = []
        hist_entry = {}
        if 'layer' not in image:
            hist_entry['empty_layer'] = True
        if 'author' in image:
            hist_entry['author'] = image['author']
        if 'comment' in image:
            hist_entry['comment'] = image['comment']
        history.append(hist_entry)

        config['rootfs'] = {'type': 'layers', 'diff_ids': diff_ids}
        config['history'] = history
        config_blob = blob(
            output,
            media_type='application/vnd.oci.image.config.v1+json',
            text=True,
            mode=self.mode)
        with config_blob.create() as configfile:
            json.dump(config, configfile)

        if self.mode == 'docker':
            manifest = {
                'Config': config_blob.descriptor,
                'Layers': layer_descs
            }
            legacy_repositories = {}
            if 'tags' in image:
                manifest['RepoTags'] = image['tags']
                for tag in image['tags']:
                    name, version = tag.split(':', 1)
                    if name not in legacy_repositories:
                        legacy_repositories[name] = {}
                    legacy_repositories[name][version] = legacy_parent

            return manifest, legacy_repositories
        else:
            manifest = {'schemaVersion': 2}
            manifest['layers'] = layer_descs
            manifest['config'] = config_blob.descriptor
            if 'annotations' in image:
                manifest['annotations'] = image['annotations']
            manifest_blob = blob(
                output,
                media_type='application/vnd.oci.image.manifest.v1+json',
                text=True)
            with manifest_blob.create() as manifestfile:
                json.dump(manifest, manifestfile)
            platform = {
                'os': image['os'],
                'architecture': image['architecture']
            }
            if 'os.version' in image:
                platform['os.version'] = image['os.version']
            if 'os.features' in image:
                platform['os.features'] = image['os.features']
            if 'variant' in image:
                platform['variant'] = image['variant']
            manifest_blob.descriptor['platform'] = platform
            return manifest_blob.descriptor, {}
    def assemble(self, sandbox):
        # Replace <PACKAGES> if no variable was set
        packages = self._get_packages(sandbox)
        self._BuildElement__commands = dict([
            (group,
             [c.replace("<PACKAGES>", " ".join(packages)) for c in commands])
            for group, commands in self._BuildElement__commands.items()
        ])

        collectdir = super().assemble(sandbox)

        bad_overlaps = set()
        new_split_rules = {}
        new_dpkg_data = {}
        new_package_scripts = {}
        for package in packages:
            if not self._get_workspace():  # If we're not using a workspace
                package_path = os.path.join(
                    sandbox.get_directory(),
                    self.get_variable('build-root').lstrip(os.sep), 'debian',
                    package)
            else:  # We have a workspace open for this dpkg_build element
                workspace = self._get_workspace()
                package_path = os.path.join(workspace.get_absolute_path(),
                                            'debian', package)

            # Exclude DEBIAN files because they're pulled in as public metadata
            contents = [
                '/' + x for x in utils.list_relative_paths(package_path)
                if x != "." and not x.startswith("DEBIAN")
            ]
            new_split_rules[package] = contents

            # Check for any overlapping files that are different.
            # Since we're storing all these files together, we need to warn
            # because clobbering is bad!
            for content_file in contents:
                for split_package, split_contents in new_split_rules.items():
                    for split_file in split_contents:
                        content_file_path = os.path.join(
                            package_path, content_file.lstrip(os.sep))
                        split_file_path = os.path.join(
                            os.path.dirname(package_path), split_package,
                            split_file.lstrip(os.sep))
                        if (content_file == split_file
                                and os.path.isfile(content_file_path)
                                and not filecmp.cmp(content_file_path,
                                                    split_file_path)):
                            bad_overlaps.add(content_file)

            # Store /DEBIAN metadata for each package.
            # DEBIAN/control goes into bst.dpkg-data.<package>.control
            controlpath = os.path.join(package_path, "DEBIAN", "control")
            if not os.path.exists(controlpath):
                raise ElementError(
                    "{}: package {} doesn't have a DEBIAN/control in {}!".
                    format(self.name, package, package_path))
            with open(controlpath, "r") as f:
                controldata = f.read()
            new_dpkg_data[package] = {"control": controldata, "name": package}

            # DEBIAN/{pre,post}{inst,rm} scripts go into bst.package-scripts.<package>.<script>
            scriptfiles = ["preinst", "postinst", "prerm", "postrm"]
            for s in scriptfiles:
                path = os.path.join(package_path, "DEBIAN", s)
                if os.path.exists(path):
                    if package not in new_package_scripts:
                        new_package_scripts[package] = {}
                    with open(path, "r") as f:
                        data = f.read()
                    new_package_scripts[package][s] = data

        bstdata = self.get_public_data("bst")
        bstdata["split-rules"] = new_split_rules
        bstdata["dpkg-data"] = new_dpkg_data
        if new_package_scripts:
            bstdata["package-scripts"] = new_package_scripts

        self.set_public_data("bst", bstdata)

        if bad_overlaps:
            self.warn('Destructive overlaps found in some files',
                      detail='\n'.join(bad_overlaps))

        return collectdir
예제 #28
0
    def configure(self, node):
        node.validate_keys(["mode", "gzip", "images", "annotations"])

        self.mode = node.get_str("mode", "oci")
        # FIXME: use a enum with node.get_enum here
        if self.mode not in ["docker", "oci"]:
            raise ElementError(
                '{}: Mode must be "oci" or "docker"'.format(
                    node.get_scalar("mode").get_provenance()
                )
            )

        self.gzip = node.get_bool("gzip", self.mode == "oci")

        if "annotations" not in node:
            self.annotations = None
        else:
            self.annotations = {}
            annotations = node.get_mapping("images")
            for k, value in annotations.items():
                v = self.node_subst_vars(value)
                self.annotations[k] = v

        self.images = []
        for image in node.get_sequence("images"):
            image.validate_keys(
                [
                    "parent",
                    "layer",
                    "architecture",
                    "variant",
                    "os",
                    "os.version",
                    "os.features",
                    "author",
                    "comment",
                    "config",
                    "annotations",
                ]
                + (["tags"] if self.mode == "docker" else [])
            )
            parent = image.get_mapping("parent", None)
            image_value = {}
            if parent:
                parent.validate_keys(["element", "image"])

                parent = {
                    "element": parent.get_str("element"),
                    "image": parent.get_int("image", 0),
                }

                image_value["parent"] = parent
            if "layer" in image:
                image_value["layer"] = self.node_subst_sequence_vars(
                    image.get_sequence("layer")
                )

            image_value["architecture"] = self.node_subst_vars(
                image.get_scalar("architecture")
            )

            if "tags" in image:
                image_value["tags"] = self.node_subst_sequence_vars(
                    image.get_sequence("tags")
                )

            image_value["os"] = self.node_subst_vars(image.get_scalar("os"))

            if "os.version" in image:
                image_value["os.version"] = self.node_subst_vars(
                    image.get_scalar("os.version")
                )
            if "os.features" in image:
                image_value["os.features"] = self.node_subst_sequence_vars(
                    image.get_sequence("os.features")
                )
            if "os.features" in image:
                image_value["variant"] = self.node_subst_vars(
                    image.get_scalar("variant")
                )

            if "author" in image:
                image_value["author"] = self.node_subst_vars(
                    image.get_scalar("author")
                )

            if "comment" in image:
                image_value["comment"] = self.node_subst_vars(
                    image.get_scalar("comment")
                )

            if "config" in image:
                config = image.get_mapping("config")

                common_config = [
                    "User",
                    "ExposedPorts",
                    "Env",
                    "Entrypoint",
                    "Cmd",
                    "Volumes",
                    "WorkingDir",
                ]
                docker_config = [
                    "Memory",
                    "MemorySwap",
                    "CpuShares",
                    "Healthcheck",
                ]
                oci_config = ["Labels", "StopSignal"]

                config.validate_keys(
                    common_config
                    + (docker_config if self.mode == "docker" else oci_config)
                )

                config_value = {}
                for member in ["User", "WorkingDir", "StopSignal"]:
                    if member in config:
                        config_value[member] = self.node_subst_vars(
                            config.get_scalar(member)
                        )

                for member in ["Memory", "MemorySwap", "CpuShares"]:
                    if member in config:
                        config_value[member] = int(
                            self.node_subst_vars(config.get_scalar(member))
                        )

                for member in [
                    "ExposedPorts",
                    "Volumes",
                    "Env",
                    "Entrypoint",
                    "Cmd",
                ]:
                    if member in config:
                        config_value[member] = self.node_subst_sequence_vars(
                            config.get_sequence(member)
                        )

                if "Labels" in config:
                    labels = config.get_mapping("Labels")
                    config_value["Labels"] = {}
                    for k, v in labels.items():
                        config_value["Labels"][k] = v

                if "Healthcheck" in config:
                    healthcheck = config.get_mapping("Healthcheck")
                    healthcheck.validate_keys(
                        ["Test", "Interval", "Timeout", "Retries"]
                    )
                    config_value["Healthcheck"] = {}
                    if "Test" in healthcheck:
                        config_value["Healthcheck"][
                            "Test"
                        ] = self.node_subst_sequence_vars(
                            healthcheck.get_sequence("Test")
                        )
                    for member in ["Interval", "Timeout", "Retries"]:
                        if member in healthcheck:
                            config_value["Healthcheck"][member] = int(
                                self.node_subst_sequence_vars(
                                    healthcheck.get_scalar(member)
                                )
                            )

                image_value["config"] = config_value
            if "annotations" in image:
                image_value["annotations"] = {}
                annotations = image.get_mapping("annotations")
                for k, value in annotations.items():
                    v = self.node_subst_vars(value)
                    image_value["annotations"][k] = v

            self.images.append(image_value)
예제 #29
0
    def _build_image(self, sandbox, image, root, output):
        if "layer" in image:
            if root.exists("parent_checkout"):
                root.remove("parent_checkout", recursive=True)
            parent_checkout = root.descend("parent_checkout", create=True)

        layer_descs = []
        layer_files = []
        diff_ids = []
        history = None
        legacy_parent = None

        config = {}
        if "author" in image:
            config["author"] = image["author"]
        config["architecture"] = image["architecture"]
        config["os"] = image["os"]
        if "config" in image:
            config["config"] = {}
            for k, v in image["config"].items():
                if k in ["ExposedPorts", "Volumes"]:
                    config["config"][k] = {}
                    for value in v:
                        config["config"][k][value] = {}
                else:
                    config["config"][k] = v

        if "parent" in image:
            if root.exists("parent"):
                root.remove("parent", recursive=True)
            parent = root.descend("parent", create=True)
            parent_dep = self.search(image["parent"]["element"])
            if not parent_dep:
                raise ElementError(
                    "{}: Element not in dependencies: {}".format(
                        self, image["parent"]["element"]
                    )
                )

            parent_dep.stage_dependency_artifacts(sandbox, path="parent")
            if not parent.exists("index.json"):
                with parent.open_file(
                    "manifest.json",
                    mode="r",
                ) as f:
                    parent_index = json.load(f)
                parent_image = parent_index[image["parent"]["image"]]
                layers = parent_image["Layers"]

                with parent.open_file(
                    *parent_image["Config"].split("/"), mode="r"
                ) as f:
                    image_config = json.load(f)
                diff_ids = image_config["rootfs"]["diff_ids"]

                if "history" in image_config:
                    history = image_config["history"]

                for i, layer in enumerate(layers):
                    _, diff_id = diff_ids[i].split(":", 1)
                    with parent.open_file(
                        *layer.split("/"), mode="rb"
                    ) as origblob:
                        if self.gzip:
                            targz_blob = blob(
                                output,
                                media_type="application/vnd.oci.image.layer.v1.tar+gzip",
                                mode=self.mode,
                            )
                            with targz_blob.create() as gzipfile:
                                with gzip.GzipFile(
                                    filename=diff_id,
                                    fileobj=gzipfile,
                                    mode="wb",
                                    mtime=1320937200,
                                ) as gz:
                                    shutil.copyfileobj(origblob, gz)
                            layer_descs.append(targz_blob.descriptor)
                            layer_files.append(targz_blob.path)
                            legacy_parent = tar_blob.legacy_id
                        else:
                            legacy_config = {"os": image["os"]}
                            if legacy_parent:
                                legacy_config["parent"] = legacy_parent
                            tar_blob = blob(
                                output,
                                media_type="application/vnd.oci.image.layer.v1.tar",
                                mode=self.mode,
                            )
                            with tar_blob.create() as newfile:
                                shutil.copyfileobj(origblob, newfile)
                            layer_descs.append(tar_blob.descriptor)
                            layer_files.append(tar_blob.path)
                            legacy_parent = tar_blob.legacy_id
            else:
                with parent.open_file("index.json", mode="r") as f:
                    parent_index = json.load(f)
                parent_image_desc = parent_index["manifests"][
                    image["parent"]["image"]
                ]
                algo, h = parent_image_desc["digest"].split(":", 1)
                with parent.open_file(
                    "blobs", *algo.split("/"), *h.split("/"), mode="r"
                ) as f:
                    image_manifest = json.load(f)
                algo, h = image_manifest["config"]["digest"].split(":", 1)
                with parent.open_file(
                    "blobs", *algo.split("/"), *h.split("/"), mode="r"
                ) as f:
                    image_config = json.load(f)
                diff_ids = image_config["rootfs"]["diff_ids"]
                if "history" in image_config:
                    history = image_config["history"]
                for i, layer in enumerate(image_manifest["layers"]):
                    _, diff_id = diff_ids[i].split(":", 1)
                    algo, h = layer["digest"].split(":", 1)
                    origfile = ["blobs", *algo.split("/"), *h.split("/")]
                    with ExitStack() as e:
                        if "layer" not in image and i + 1 == len(
                            image_manifest["layers"]
                        ):
                            # The case were we do not add a layer, the last imported layer has to be fully reconfigured
                            legacy_config = {}
                            legacy_config.update(config)
                            if legacy_parent:
                                legacy_config["parent"] = legacy_parent
                        else:
                            legacy_config = {"os": image["os"]}
                        if legacy_parent:
                            legacy_config["parent"] = legacy_parent
                        if self.gzip:
                            output_blob = blob(
                                output,
                                media_type="application/vnd.oci.image.layer.v1.tar+gzip",
                                mode=self.mode,
                            )
                        else:
                            output_blob = blob(
                                output,
                                media_type="application/vnd.oci.image.layer.v1.tar",
                                mode=self.mode,
                                legacy_config=legacy_config,
                            )
                        outp = e.enter_context(output_blob.create())
                        inp = e.enter_context(
                            parent.open_file(*origfile, mode="rb")
                        )
                        if layer["mediaType"].endswith("+gzip"):
                            if self.gzip:
                                shutil.copyfileobj(inp, outp)
                            else:
                                gz = e.enter_context(
                                    gzip.open(filename=inp, mode="rb")
                                )
                                shutil.copyfileobj(gz, outp)
                        else:
                            if self.gzip:
                                gz = e.enter_context(
                                    gzip.GzipFile(
                                        filename=diff_id,
                                        fileobj=outp,
                                        mode="wb",
                                        mtime=1320937200,
                                    )
                                )
                                shutil.copyfileobj(inp, gz)
                            else:
                                shutil.copyfileobj(inp, outp)

                    layer_descs.append(output_blob.descriptor)
                    layer_files.append(output_blob.path)
                    legacy_parent = output_blob.legacy_id

        if "parent" in image and "layer" in image:
            unpacked = False
            if isinstance(parent_dep, OciElement):
                # Here we read the parent configuration to checkout
                # the artifact which is much faster than unpacking the tar
                # files.
                layers = []
                parent_image = image["parent"]["image"]
                for layer in parent_dep.images[parent_image]["layer"]:
                    layer_dep = parent_dep.search(layer)
                    if not layer_dep:
                        raise ElementError(
                            "{}: Element not in dependencies: {}".format(
                                parent_dep, layer
                            )
                        )

                    # We need to verify dependencies. If not in current
                    # element's dependencies, then we cannnot safely assume
                    # it is cached. Parent could be cached while its
                    # dependencies either removed or not pulled.
                    if layer_dep != self.search(layer):
                        self.warn(
                            "In order to optimize building of {}, you should add {} as build dependency".format(
                                self.name, layer
                            )
                        )
                        layers = None
                        break

                    layers.append(layer_dep)
                if layers is not None:
                    with self.timed_activity(
                        "Checking out layer from {}".format(parent_dep.name)
                    ):
                        for layer_dep in layers:
                            layer_dep.stage_dependency_artifacts(
                                sandbox, path="parent_checkout"
                            )
                        unpacked = True

            if not unpacked:
                for layer in layer_files:
                    if self.gzip:
                        mode = "r:gz"
                    else:
                        mode = "r:"
                    with self.timed_activity(
                        "Decompressing layer {}".format(layer)
                    ):
                        with output.open_file(
                            layer, mode="rb"
                        ) as f, tarfile.open(fileobj=f, mode=mode) as t:
                            members = []
                            for info in t.getmembers():
                                if "/../" in info.name:
                                    continue
                                if info.name.startswith("../"):
                                    continue

                                dirname, basename = os.path.split(info.name)
                                if basename == ".wh..wh..opq":
                                    # Replace with empty directory
                                    parent_checkout.remove(
                                        *dirname.split("/"), recursive=True
                                    )
                                    parent_checkout.descend(
                                        *dirname.split("/"), create=True
                                    )
                                elif basename.startswith(".wh."):
                                    parent_checkout.remove(
                                        *dirname.split("/"),
                                        basename[4:],
                                        recursive=True,
                                    )
                                else:
                                    members.append(info)

                            t.extractall(path=parent_checkout, members=members)

        legacy_config = {}
        legacy_config.update(config)
        if legacy_parent:
            legacy_config["parent"] = legacy_parent

        if "layer" in image:
            for name in image["layer"]:
                dep = self.search(name)
                dep.stage_dependency_artifacts(sandbox, path="layer")

            layer = root.descend("layer")
            with self.timed_activity("Transforming into layer"):

                def create_whiteouts(parentdir, layerdir):
                    for f in parentdir:
                        if not layerdir.exists(f):
                            with layerdir.open_file(".wh." + f, mode="w"):
                                pass
                        elif parentdir.isdir(f) and layerdir.isdir(f):
                            # Recurse into subdirectory
                            create_whiteouts(
                                parentdir.descend(f), layerdir.descend(f)
                            )

                create_whiteouts(parent_checkout, layer)

                if "parent" in image:

                    def remove_duplicates(parentdir, layerdir):
                        for f in list(layerdir):
                            if not parentdir.exists(f):
                                pass
                            elif parentdir.isdir(f) and layerdir.isdir(f):
                                # Recurse into subdirectory
                                remove_duplicates(
                                    parentdir.descend(f), layerdir.descend(f)
                                )
                            else:
                                old_st = parentdir.stat(f)
                                new_st = layerdir.stat(f)
                                if old_st.st_mode != new_st.st_mode:
                                    continue
                                if int(old_st.st_mtime) != int(
                                    new_st.st_mtime
                                ):
                                    continue
                                if stat.S_ISLNK(old_st.st_mode):
                                    if parentdir.readlink(
                                        f
                                    ) == layerdir.readlink(f):
                                        layerdir.remove(f)
                                else:
                                    if parentdir.file_digest(
                                        f
                                    ) == layerdir.file_digest(f):
                                        layerdir.remove(f)

                    remove_duplicates(parent_checkout, layer)

            with tempfile.TemporaryFile(mode="w+b") as tfile:
                with tarfile.open(fileobj=tfile, mode="w:") as t:
                    with self.timed_activity("Building layer tar"):
                        layer.export_to_tar(t, "")
                tfile.seek(0)
                tar_hash = hashlib.sha256()
                with self.timed_activity("Hashing layer"):
                    while True:
                        data = tfile.read(16 * 1024)
                        if not data:
                            break
                        tar_hash.update(data)
                tfile.seek(0)
                if self.gzip:
                    targz_blob = blob(
                        output,
                        media_type="application/vnd.oci.image.layer.v1.tar+gzip",
                        mode=self.mode,
                    )
                    with self.timed_activity("Compressing layer"):
                        with targz_blob.create() as gzipfile:
                            with gzip.GzipFile(
                                filename=tar_hash.hexdigest(),
                                fileobj=gzipfile,
                                mode="wb",
                                mtime=1320937200,
                            ) as gz:
                                shutil.copyfileobj(tfile, gz)
                    layer_descs.append(targz_blob.descriptor)
                else:
                    copied_blob = blob(
                        output,
                        media_type="application/vnd.oci.image.layer.v1.tar",
                        mode=self.mode,
                        legacy_config=legacy_config,
                    )
                    with copied_blob.create() as copiedfile:
                        shutil.copyfileobj(tfile, copiedfile)
                    layer_descs.append(copied_blob.descriptor)
                    legacy_parent = copied_blob.legacy_id

            diff_ids.append("sha256:{}".format(tar_hash.hexdigest()))

        if not history:
            history = []
        hist_entry = {}
        if "layer" not in image:
            hist_entry["empty_layer"] = True
        if "author" in image:
            hist_entry["author"] = image["author"]
        if "comment" in image:
            hist_entry["comment"] = image["comment"]
        history.append(hist_entry)

        config["rootfs"] = {"type": "layers", "diff_ids": diff_ids}
        config["history"] = history
        config_blob = blob(
            output,
            media_type="application/vnd.oci.image.config.v1+json",
            text=True,
            mode=self.mode,
        )
        with config_blob.create() as configfile:
            json.dump(config, configfile)

        if self.mode == "docker":
            manifest = {
                "Config": config_blob.descriptor,
                "Layers": layer_descs,
            }
            legacy_repositories = {}
            if "tags" in image:
                manifest["RepoTags"] = image["tags"]
                for tag in image["tags"]:
                    name, version = tag.split(":", 1)
                    if name not in legacy_repositories:
                        legacy_repositories[name] = {}
                    legacy_repositories[name][version] = legacy_parent

            return manifest, legacy_repositories
        else:
            manifest = {"schemaVersion": 2}
            manifest["layers"] = layer_descs
            manifest["config"] = config_blob.descriptor
            if "annotations" in image:
                manifest["annotations"] = image["annotations"]
            manifest_blob = blob(
                output,
                media_type="application/vnd.oci.image.manifest.v1+json",
                text=True,
            )
            with manifest_blob.create() as manifestfile:
                json.dump(manifest, manifestfile)
            platform = {
                "os": image["os"],
                "architecture": image["architecture"],
            }
            if "os.version" in image:
                platform["os.version"] = image["os.version"]
            if "os.features" in image:
                platform["os.features"] = image["os.features"]
            if "variant" in image:
                platform["variant"] = image["variant"]
            manifest_blob.descriptor["platform"] = platform
            return manifest_blob.descriptor, {}
예제 #30
0
    def stage(self, sandbox):
        super().stage(sandbox)
        # For each package, create a subdir in build-root and copy the files to there
        # then reconstitute the /DEBIAN files.
        input_elm = self.search(Scope.BUILD, self.__input)
        if not input_elm:
            raise ElementError(
                "{}: Failed to find input element {} in build-depends".format(
                    self.name, self.__input))
            return
        bstdata = input_elm.get_public_data('bst')
        if "dpkg-data" not in bstdata:
            raise ElementError(
                "{}: input element {} does not have any bst.dpkg-data public data"
                .format(self.name, self.__input))
        for package, package_data in self.node_items(bstdata['dpkg-data']):
            package_name = package_data.get(
                "name", "{}-{}".format(input_elm.normal_name, package))
            if not ("split-rules" in bstdata
                    and package in bstdata["split-rules"]):
                raise ElementError(
                    "{}: Input element {} does not have bst.split-rules.{}".
                    format(self.name, self.__input.name, package))
            package_splits = bstdata['split-rules'][package]
            package_files = input_elm.compute_manifest(include=[package])
            src = os.path.join(sandbox.get_directory(),
                               self.get_variable("build-root").lstrip(os.sep))
            dst = os.path.join(src, package)
            os.makedirs(dst, exist_ok=True)
            utils.link_files(src, dst, files=package_files)

            # Create this dir. If it already exists,
            # something unexpected has happened.
            debiandir = os.path.join(dst, "DEBIAN")
            os.makedirs(debiandir)

            # Recreate the DEBIAN files.
            # control is extracted verbatim, and is mandatory.
            if "control" not in package_data:
                raise ElementError(
                    "{}: Cannot reconstitute package {}".format(
                        self.name, package),
                    detail="There is no public.bst.dpkg-data.{}.control".
                    format(package))
            controlpath = os.path.join(debiandir, "control")
            controltext = package_data["control"]
            # Slightly ugly way of renaming the package
            controltext = re.sub(r"^Package:\s*\S+",
                                 "Package: {}".format(package_name),
                                 controltext)
            with open(controlpath, "w") as f:
                f.write(controltext)

            # Generate a DEBIAN/md5sums file from the artifact
            md5sums = {}
            for split in package_files:
                filepath = os.path.join(src, split.lstrip(os.sep))
                if os.path.isfile(filepath):
                    md5sums[split] = md5sum_file(filepath)
            md5sumspath = os.path.join(debiandir, "md5sums")
            with open(md5sumspath, "w") as f:
                for path, md5sum in md5sums.items():
                    f.write("{}  {}\n".format(md5sum, path))

            # scripts may exist
            if ("package-scripts" in bstdata
                    and package in bstdata["package-scripts"]):
                for script in ["postinst", "preinst", "postrm", "prerm"]:
                    if script in bstdata["package-scripts"][package]:
                        filepath = os.path.join(debiandir, script)
                        with open(filepath, "w") as f:
                            f.write(
                                bstdata["package-scripts"][package][script])
                        os.chmod(filepath, 0o755)