예제 #1
0
    def run(self):
        self.source = get_flatpak_source_info(self.workflow)
        if self.source is None:
            raise RuntimeError(
                "flatpak_create_dockerfile must be run before flatpak_create_oci"
            )

        self.builder = FlatpakBuilder(self.source,
                                      self.workflow.source.workdir,
                                      'var/tmp/flatpak-build',
                                      parse_manifest=parse_rpm_output)

        tarred_filesystem, manifest = self._export_filesystem()
        self.log.info('filesystem tarfile written to %s', tarred_filesystem)
        self.log.info('manifest written to %s', manifest)

        image_components = self.builder.get_components(manifest)
        self.workflow.image_components = image_components

        ref_name, outfile, tarred_outfile = self.builder.build_container(
            tarred_filesystem)

        metadata = get_exported_image_metadata(outfile, IMAGE_TYPE_OCI)
        metadata['ref_name'] = ref_name
        self.workflow.exported_image_sequence.append(metadata)

        self.log.info('OCI image is available as %s', outfile)

        metadata = get_exported_image_metadata(tarred_outfile,
                                               IMAGE_TYPE_OCI_TAR)
        metadata['ref_name'] = ref_name
        self.workflow.exported_image_sequence.append(metadata)

        self.log.info('OCI tarfile is available as %s', tarred_outfile)
    def run(self):
        self.source = get_flatpak_source_info(self.workflow)
        if self.source is None:
            raise RuntimeError("flatpak_create_dockerfile must be run before flatpak_create_oci")

        self.builder = FlatpakBuilder(self.source, self.workflow.source.workdir,
                                      'var/tmp/flatpak-build',
                                      parse_manifest=parse_rpm_output)

        tarred_filesystem, manifest = self._export_filesystem()
        self.log.info('filesystem tarfile written to %s', tarred_filesystem)
        self.log.info('manifest written to %s', manifest)

        image_components = self.builder.get_components(manifest)
        self.workflow.image_components = image_components

        ref_name, outfile, tarred_outfile = self.builder.build_container(tarred_filesystem)

        metadata = get_exported_image_metadata(outfile, IMAGE_TYPE_OCI)
        metadata['ref_name'] = ref_name
        self.workflow.exported_image_sequence.append(metadata)

        self.log.info('OCI image is available as %s', outfile)

        metadata = get_exported_image_metadata(tarred_outfile, IMAGE_TYPE_OCI_TAR)
        metadata['ref_name'] = ref_name
        self.workflow.exported_image_sequence.append(metadata)

        self.log.info('OCI tarfile is available as %s', tarred_outfile)
예제 #3
0
    def run(self):
        source = get_flatpak_source_info(self.workflow)
        if source is None:
            raise RuntimeError(
                "flatpak_create_dockerfile must be run before flatpak_create_oci"
            )

        self.builder = FlatpakBuilder(source,
                                      self.workflow.source.workdir,
                                      'var/tmp/flatpak-build',
                                      parse_manifest=parse_rpm_output,
                                      flatpak_metadata=self.flatpak_metadata)

        df_labels = df_parser(self.workflow.builder.df_path,
                              workflow=self.workflow).labels
        self.builder.add_labels(df_labels)

        tarred_filesystem, manifest = self._export_filesystem()
        self.log.info('filesystem tarfile written to %s', tarred_filesystem)
        self.log.info('manifest written to %s', manifest)

        image_components = self.builder.get_components(manifest)
        self.workflow.image_components = image_components

        ref_name, outfile, tarred_outfile = self.builder.build_container(
            tarred_filesystem)

        self.log.info('Marking filesystem image "%s" for removal',
                      self.workflow.builder.image_id)
        defer_removal(self.workflow, self.workflow.builder.image_id)

        image_id = self._get_oci_image_id(outfile)
        self.log.info('New OCI image ID is %s', image_id)
        self.workflow.builder.image_id = image_id

        labels = Labels(df_labels)
        _, image_name = labels.get_name_and_value(Labels.LABEL_TYPE_NAME)
        _, image_version = labels.get_name_and_value(Labels.LABEL_TYPE_VERSION)
        _, image_release = labels.get_name_and_value(Labels.LABEL_TYPE_RELEASE)

        name = '{}-{}'.format(self.key, image_name)
        tag = '{}-{}'.format(image_version, image_release)
        # The OCI id is tracked by the builder. The image will be removed in the exit phase
        # No need to mark it for removal after pushing to the local storage
        self._copy_oci_to_local_storage(outfile, name, tag)

        metadata = get_exported_image_metadata(outfile, IMAGE_TYPE_OCI)
        metadata['ref_name'] = ref_name
        self.workflow.exported_image_sequence.append(metadata)

        self.log.info('OCI image is available as %s', outfile)

        metadata = get_exported_image_metadata(tarred_outfile,
                                               IMAGE_TYPE_OCI_TAR)
        metadata['ref_name'] = ref_name
        self.workflow.exported_image_sequence.append(metadata)

        self.log.info('OCI tarfile is available as %s', tarred_outfile)
예제 #4
0
    def run(self):
        self.source = get_flatpak_source_info(self.workflow)
        if self.source is None:
            raise RuntimeError(
                "flatpak_create_dockerfile must be run before flatpak_create_oci"
            )

        tarred_filesystem, manifest = self._export_filesystem()
        self.log.info('filesystem tarfile written to %s', tarred_filesystem)
        self.log.info('manifest written to %s', manifest)

        all_components = self._get_components(manifest)
        if self.source.runtime:
            image_components = all_components
        else:
            image_components = self._filter_app_manifest(all_components)

        self.log.info(
            "Components:\n%s", "\n".join(
                "        {name}-{epoch}:{version}-{release}.{arch}.rpm".format(
                    **c) for c in image_components))

        self.workflow.image_components = image_components

        outfile = os.path.join(self.workflow.source.workdir,
                               'flatpak-oci-image')

        if self.source.runtime:
            ref_name = self._create_runtime_oci(tarred_filesystem, outfile)
        else:
            ref_name = self._create_app_oci(tarred_filesystem, outfile)

        metadata = get_exported_image_metadata(outfile, IMAGE_TYPE_OCI)
        metadata['ref_name'] = ref_name
        self.workflow.exported_image_sequence.append(metadata)

        self.log.info('OCI image is available as %s', outfile)

        tarred_outfile = outfile + '.tar'
        with tarfile.TarFile(tarred_outfile, "w") as tf:
            for f in os.listdir(outfile):
                tf.add(os.path.join(outfile, f), f)

        metadata = get_exported_image_metadata(tarred_outfile,
                                               IMAGE_TYPE_OCI_TAR)
        metadata['ref_name'] = ref_name
        self.workflow.exported_image_sequence.append(metadata)

        self.log.info('OCI tarfile is available as %s', tarred_outfile)
    def run(self):
        if self.load_exported_image:
            if len(self.workflow.exported_image_sequence) == 0:
                raise RuntimeError(
                    'load_exported_image used, but no exported image')
            image_metadata = self.workflow.exported_image_sequence[-1]
            image = image_metadata.get('path')
            image_type = image_metadata.get('type')
            self.log.info('preparing to compress image %s', image)
            with open(image, 'rb') as image_stream:
                outfile = self._compress_image_stream(image_stream)
        else:
            image = self.workflow.image
            image_type = IMAGE_TYPE_DOCKER_ARCHIVE
            self.log.info('fetching image %s from docker', image)
            with self.tasker.d.get_image(image) as image_stream:
                outfile = self._compress_image_stream(image_stream)
        metadata = get_exported_image_metadata(outfile, image_type)

        if self.uncompressed_size != 0:
            metadata['uncompressed_size'] = self.uncompressed_size
            savings = 1 - metadata['size'] / float(
                metadata['uncompressed_size'])
            self.log.debug(
                'uncompressed: %s, compressed: %s, ratio: %.2f %% saved',
                human_size(metadata['uncompressed_size']),
                human_size(metadata['size']), 100 * savings)

        self.workflow.exported_image_sequence.append(metadata)
        self.log.info('compressed image is available as %s', outfile)
예제 #6
0
    def run(self):
        if self.workflow.build_result.skip_layer_squash:
            return  # enable build plugins to prevent unnecessary squashes
        if self.save_archive:
            output_path = os.path.join(self.workflow.source.workdir,
                                       EXPORTED_SQUASHED_IMAGE_NAME)
            metadata = {"path": output_path}
        else:
            output_path = None

        # Squash the image and output tarfile
        # If the parameter dont_load is set to True squashed image won't be
        # loaded in to Docker daemon. If it's set to False it will be loaded.
        new_id = Squash(log=self.log,
                        image=self.image,
                        from_layer=self.from_layer,
                        tag=self.tag,
                        output_path=output_path,
                        load_image=not self.dont_load).run()

        if ':' not in new_id:
            # Older versions of the daemon do not include the prefix
            new_id = 'sha256:{}'.format(new_id)

        if not self.dont_load:
            self.workflow.builder.image_id = new_id

        if self.save_archive:
            metadata.update(
                get_exported_image_metadata(output_path,
                                            IMAGE_TYPE_DOCKER_ARCHIVE))
            self.workflow.exported_image_sequence.append(metadata)
        defer_removal(self.workflow, self.image)
예제 #7
0
    def run(self):
        if self.load_exported_image:
            if len(self.workflow.exported_image_sequence) == 0:
                raise RuntimeError('load_exported_image used, but no exported image')
            image = self.workflow.exported_image_sequence[-1].get('path')
            self.log.info('preparing to compress image %s', image)
            with open(image, 'rb') as image_stream:
                outfile = self._compress_image_stream(image_stream)
        else:
            image = self.workflow.image
            self.log.info('fetching image %s from docker', image)
            with self.tasker.d.get_image(image) as image_stream:
                outfile = self._compress_image_stream(image_stream)
        metadata = get_exported_image_metadata(outfile)

        if self.uncompressed_size != 0:
            metadata['uncompressed_size'] = self.uncompressed_size
            savings = 1 - metadata['size'] / float(metadata['uncompressed_size'])
            self.log.debug('uncompressed: %s, compressed: %s, ratio: %.2f %% saved',
                           human_size(metadata['uncompressed_size']),
                           human_size(metadata['size']),
                           100*savings)

        self.workflow.exported_image_sequence.append(metadata)
        self.log.info('compressed image is available as %s', outfile)
예제 #8
0
    def run(self):
        if self.workflow.build_result.skip_layer_squash:
            return  # enable build plugins to prevent unnecessary squashes
        if self.save_archive:
            output_path = os.path.join(self.workflow.source.workdir, EXPORTED_SQUASHED_IMAGE_NAME)
            metadata = {"path": output_path}
        else:
            output_path = None

        # Squash the image and output tarfile
        # If the parameter dont_load is set to True squashed image won't be
        # loaded in to Docker daemon. If it's set to False it will be loaded.
        new_id = Squash(log=self.log, image=self.image, from_layer=self.from_layer,
                        tag=self.tag, output_path=output_path, load_image=not self.dont_load).run()

        if ':' not in new_id:
            # Older versions of the daemon do not include the prefix
            new_id = 'sha256:{}'.format(new_id)

        if not self.dont_load:
            self.workflow.builder.image_id = new_id

        if self.save_archive:
            metadata.update(get_exported_image_metadata(output_path, IMAGE_TYPE_DOCKER_ARCHIVE))
            self.workflow.exported_image_sequence.append(metadata)
        defer_removal(self.workflow, self.image)
예제 #9
0
    def run(self):
        if self.load_exported_image and len(
                self.workflow.exported_image_sequence) > 0:
            image_metadata = self.workflow.exported_image_sequence[-1]
            image = image_metadata.get('path')
            image_type = image_metadata.get('type')
            self.log.info('preparing to compress image %s', image)
            with open(image, 'rb') as image_stream:
                outfile = self._compress_image_stream(image_stream)
        else:
            if self.source_build:
                self.log.info('skipping, no exported source image to compress')
                return
            image = self.workflow.image
            image_type = IMAGE_TYPE_DOCKER_ARCHIVE
            self.log.info('fetching image %s from docker', image)
            with self.tasker.get_image(image) as image_stream:
                outfile = self._compress_image_stream(image_stream)
        metadata = get_exported_image_metadata(outfile, image_type)

        if self.uncompressed_size != 0:
            metadata['uncompressed_size'] = self.uncompressed_size
            savings = 1 - metadata['size'] / metadata['uncompressed_size']
            self.log.debug(
                'uncompressed: %s, compressed: %s, ratio: %.2f %% saved',
                human_size(metadata['uncompressed_size']),
                human_size(metadata['size']), 100 * savings)

        self.workflow.exported_image_sequence.append(metadata)
        self.log.info('compressed image is available as %s', outfile)
예제 #10
0
    def run(self):
        if self.load_exported_image and len(self.workflow.exported_image_sequence) > 0:
            image_metadata = self.workflow.exported_image_sequence[-1]
            image = image_metadata.get('path')
            image_type = image_metadata.get('type')
            self.log.info('preparing to compress image %s', image)
            with open(image, 'rb') as image_stream:
                outfile = self._compress_image_stream(image_stream)
        else:
            image = self.workflow.image
            image_type = IMAGE_TYPE_DOCKER_ARCHIVE
            self.log.info('fetching image %s from docker', image)
            with self.tasker.d.get_image(image) as image_stream:
                outfile = self._compress_image_stream(image_stream)
        metadata = get_exported_image_metadata(outfile, image_type)

        if self.uncompressed_size != 0:
            metadata['uncompressed_size'] = self.uncompressed_size
            savings = 1 - metadata['size'] / metadata['uncompressed_size']
            self.log.debug('uncompressed: %s, compressed: %s, ratio: %.2f %% saved',
                           human_size(metadata['uncompressed_size']),
                           human_size(metadata['size']),
                           100*savings)

        self.workflow.exported_image_sequence.append(metadata)
        self.log.info('compressed image is available as %s', outfile)
예제 #11
0
    def run(self):
        metadata = {
            "path":
            os.path.join(self.workflow.source.workdir,
                         EXPORTED_SQUASHED_IMAGE_NAME)
        }

        if self.dont_load:
            # squash the image, don't load it back to docker
            Squash(log=self.log,
                   image=self.image,
                   from_layer=self.from_layer,
                   tag=self.tag,
                   output_path=metadata["path"],
                   load_image=False).run()
        else:
            # squash the image and output both tarfile and Docker engine image
            new_id = Squash(log=self.log,
                            image=self.image,
                            from_layer=self.from_layer,
                            tag=self.tag,
                            output_path=metadata["path"],
                            load_image=True).run()
            self.workflow.builder.image_id = new_id

        metadata.update(get_exported_image_metadata(metadata["path"]))
        self.workflow.exported_image_sequence.append(metadata)
        defer_removal(self.workflow, self.image)
예제 #12
0
    def run(self):
        """
        Build image inside current environment using buildah;
        It's expected this may run within (privileged) oci container.

        Returns:
            BuildResult
        """
        builder = self.workflow.builder

        image = builder.image.to_str()
        kwargs = dict(stdout=subprocess.PIPE,
                      stderr=subprocess.STDOUT,
                      universal_newlines=True)
        encoding_params = dict(encoding='utf-8', errors='replace')
        kwargs.update(encoding_params)
        ib_process = subprocess.Popen(
            ['buildah', 'bud', '-t', image, builder.df_dir], **kwargs)

        self.log.debug('buildah build has begun; waiting for it to finish')
        output = []
        while True:
            poll = ib_process.poll()
            out = ib_process.stdout.readline()
            if out:
                self.log.info('%s', out.rstrip())
                output.append(out)
            elif poll is not None:
                break

        if ib_process.returncode != 0:
            # in the case of an apparent failure, single out the last line to
            # include in the failure summary.
            err = output[
                -1] if output else "<buildah had bad exit code but no output>"
            return BuildResult(
                logs=output,
                fail_reason="image build failed (rc={}): {}".format(
                    ib_process.returncode, err),
            )

        image_id = builder.get_built_image_info()['Id']
        if ':' not in image_id:
            # Older versions of the daemon do not include the prefix
            image_id = 'sha256:{}'.format(image_id)

        # since we need no squash, export the image for local operations like squash would have
        self.log.info("fetching image %s from docker", image)
        output_path = os.path.join(self.workflow.source.workdir,
                                   EXPORTED_SQUASHED_IMAGE_NAME)
        with open(output_path, "w") as image_file:
            image_file.write(self.tasker.get_image(image).data)
        img_metadata = get_exported_image_metadata(output_path,
                                                   IMAGE_TYPE_DOCKER_ARCHIVE)
        self.workflow.exported_image_sequence.append(img_metadata)

        return BuildResult(logs=output,
                           image_id=image_id,
                           skip_layer_squash=True)
    def run(self):
        self.source = get_flatpak_source_info(self.workflow)
        if self.source is None:
            raise RuntimeError("flatpak_create_dockerfile must be run before flatpak_create_oci")

        tarred_filesystem, manifest = self._export_filesystem()
        self.log.info('filesystem tarfile written to %s', tarred_filesystem)
        self.log.info('manifest written to %s', manifest)

        all_components = self._get_components(manifest)
        if self.source.runtime:
            image_components = self._check_runtime_manifest(all_components)
        else:
            image_components = self._check_app_manifest(all_components)

        self.log.info("Components:\n%s",
                      "\n".join("        {name}-{epoch}:{version}-{release}.{arch}.rpm"
                                .format(**c) for c in image_components))

        self.workflow.image_components = image_components

        outfile = os.path.join(self.workflow.source.workdir, 'flatpak-oci-image')

        if self.source.runtime:
            ref_name = self._create_runtime_oci(tarred_filesystem, outfile)
        else:
            ref_name = self._create_app_oci(tarred_filesystem, outfile)

        metadata = get_exported_image_metadata(outfile, IMAGE_TYPE_OCI)
        metadata['ref_name'] = ref_name
        self.workflow.exported_image_sequence.append(metadata)

        self.log.info('OCI image is available as %s', outfile)

        tarred_outfile = outfile + '.tar'
        with tarfile.TarFile(tarred_outfile, "w") as tf:
            for f in os.listdir(outfile):
                tf.add(os.path.join(outfile, f), f)

        metadata = get_exported_image_metadata(tarred_outfile, IMAGE_TYPE_OCI_TAR)
        metadata['ref_name'] = ref_name
        self.workflow.exported_image_sequence.append(metadata)

        self.log.info('OCI tarfile is available as %s', tarred_outfile)
예제 #14
0
    def build_flatpak_image(self, source, build_dir: BuildDir) -> Dict[str, Any]:
        builder = FlatpakBuilder(source, build_dir.path,
                                 'var/tmp/flatpak-build',
                                 parse_manifest=parse_rpm_output,
                                 flatpak_metadata=self.flatpak_metadata)

        df_labels = build_dir.dockerfile_with_parent_env(
            self.workflow.imageutil.base_image_inspect()
        ).labels

        builder.add_labels(df_labels)

        tmp_dir = tempfile.mkdtemp(dir=build_dir.path)

        image_filesystem = self.workflow.imageutil.extract_filesystem_layer(
            str(build_dir.exported_squashed_image), str(tmp_dir))

        build_dir.exported_squashed_image.unlink()

        filesystem_path = os.path.join(tmp_dir, image_filesystem)

        with open(filesystem_path, 'rb') as f:
            # this part is 'not ideal' but this function seems to be a prerequisite
            # for building flatpak image since it does the setup for it
            flatpak_filesystem, flatpak_manifest = builder._export_from_stream(f)

        os.remove(filesystem_path)

        self.log.info('filesystem tarfile written to %s', flatpak_filesystem)

        image_rpm_components = builder.get_components(flatpak_manifest)

        ref_name, outfile, outfile_tarred = builder.build_container(flatpak_filesystem)

        os.remove(outfile_tarred)

        metadata = get_exported_image_metadata(outfile, IMAGE_TYPE_OCI)
        metadata['ref_name'] = ref_name

        cmd = ['skopeo', 'copy', 'oci:{path}:{ref_name}'.format(**metadata), '--format=v2s2',
               'docker-archive:{}'.format(str(build_dir.exported_squashed_image))]

        try:
            retries.run_cmd(cmd)
        except subprocess.CalledProcessError as e:
            self.log.error("skopeo copy failed with output:\n%s", e.output)
            raise RuntimeError("skopeo copy failed with output:\n{}".format(e.output)) from e

        self.log.info('OCI image is available as %s', outfile)

        shutil.rmtree(tmp_dir)

        self.workflow.data.image_components[build_dir.platform] = image_rpm_components

        return metadata
    def export_image(self, image_output_dir: Path) -> Dict[str, Union[str, int]]:
        output_path = self.workflow.build_dir.any_platform.exported_squashed_image

        cmd = ['skopeo', 'copy']
        source_img = 'oci:{}'.format(image_output_dir)
        dest_img = 'docker-archive:{}'.format(output_path)
        cmd += [source_img, dest_img]

        try:
            retries.run_cmd(cmd)
        except subprocess.CalledProcessError as e:
            self.log.error("failed to save docker-archive :\n%s", e.output)
            raise
        return get_exported_image_metadata(str(output_path), IMAGE_TYPE_DOCKER_ARCHIVE)
예제 #16
0
 def run(self):
     if self.load_exported_image:
         if len(self.workflow.exported_image_sequence) == 0:
             raise RuntimeError("load_exported_image used, but no exported image")
         image = self.workflow.exported_image_sequence[-1].get("path")
         self.log.info("preparing to compress image %s", image)
         with open(image, "rb") as image_stream:
             outfile = self._compress_image_stream(image_stream)
     else:
         image = self.workflow.image
         self.log.info("fetching image %s from docker", image)
         with self.tasker.d.get_image(image) as image_stream:
             outfile = self._compress_image_stream(image_stream)
     self.workflow.exported_image_sequence.append(get_exported_image_metadata(outfile))
     self.log.info("compressed image is available as %s", outfile)
예제 #17
0
    def download_image(self, build_dir: BuildDir):
        image = self.workflow.data.tag_conf.get_unique_images_with_platform(
            build_dir.platform)[0]
        image_path = str(build_dir.exported_squashed_image)
        image_type = IMAGE_TYPE_DOCKER_ARCHIVE

        self.log.info('fetching image %s', image)
        self.workflow.imageutil.download_image_archive_tarball(
            image, image_path)

        metadata = get_exported_image_metadata(image_path, image_type)

        self.log.info('image for platform:%s available at %s',
                      build_dir.platform, image_path)

        return metadata
예제 #18
0
    def run(self):
        metadata = {"path":
                    os.path.join(self.workflow.source.workdir, EXPORTED_SQUASHED_IMAGE_NAME)}

        if self.dont_load:
            # squash the image, don't load it back to docker
            Squash(log=self.log, image=self.image, from_layer=self.from_layer,
                   tag=self.tag, output_path=metadata["path"], load_image=False).run()
        else:
            # squash the image and output both tarfile and Docker engine image
            new_id = Squash(log=self.log, image=self.image, from_layer=self.from_layer,
                            tag=self.tag, output_path=metadata["path"], load_image=True).run()
            self.workflow.builder.image_id = new_id

        metadata.update(get_exported_image_metadata(metadata["path"]))
        self.workflow.exported_image_sequence.append(metadata)
        defer_removal(self.workflow, self.image)
예제 #19
0
 def run(self):
     if self.dont_load:
         metadata = {}
         self.workflow.exported_image_sequence.append(metadata)
         metadata["path"] = \
             os.path.join(self.workflow.source.workdir, EXPORTED_SQUASHED_IMAGE_NAME)
         # squash the image, don't load it back to docker
         Squash(log=self.log, image=self.image, from_layer=self.from_layer,
                tag=self.tag, output_path=metadata["path"]).run()
         metadata.update(get_exported_image_metadata(metadata["path"]))
     else:
         # squash the image and load it back to engine
         new_id = Squash(log=self.log, image=self.image, from_layer=self.from_layer,
                         tag=self.tag).run()
         self.workflow.builder.image_id = new_id
     if self.remove_former_image:
         self.tasker.remove_image(self.image)
예제 #20
0
 def run(self):
     if self.load_exported_image:
         if len(self.workflow.exported_image_sequence) == 0:
             raise RuntimeError(
                 'load_exported_image used, but no exported image')
         image = self.workflow.exported_image_sequence[-1].get('path')
         self.log.info('preparing to compress image %s', image)
         with open(image, 'rb') as image_stream:
             outfile = self._compress_image_stream(image_stream)
     else:
         image = self.workflow.image
         self.log.info('fetching image %s from docker', image)
         with self.tasker.d.get_image(image) as image_stream:
             outfile = self._compress_image_stream(image_stream)
     self.workflow.exported_image_sequence.append(
         get_exported_image_metadata(outfile))
     self.log.info('compressed image is available as %s', outfile)
예제 #21
0
    def export_image(self, image_output_dir):
        output_path = os.path.join(tempfile.mkdtemp(),
                                   EXPORTED_SQUASHED_IMAGE_NAME)

        cmd = ['skopeo', 'copy']
        source_img = 'oci:{}'.format(image_output_dir)
        dest_img = 'oci-archive:{}'.format(output_path)
        cmd += [source_img, dest_img]

        self.log.info("Calling: %s", ' '.join(cmd))
        try:
            subprocess.check_output(cmd, stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            self.log.error("failed to save oci-archive :\n%s", e.output)
            raise

        img_metadata = get_exported_image_metadata(output_path,
                                                   IMAGE_TYPE_OCI_TAR)
        self.workflow.exported_image_sequence.append(img_metadata)
    def build_flatpak_image(self, source,
                            build_dir: BuildDir) -> Dict[str, Any]:
        builder = FlatpakBuilder(source,
                                 build_dir.path,
                                 'var/tmp/flatpak-build',
                                 parse_manifest=parse_rpm_output,
                                 flatpak_metadata=self.flatpak_metadata)

        df_labels = build_dir.dockerfile_with_parent_env(
            self.workflow.imageutil.base_image_inspect()).labels

        builder.add_labels(df_labels)

        tmp_dir = tempfile.mkdtemp(dir=build_dir.path)

        image_filesystem = self.workflow.imageutil.extract_filesystem_layer(
            str(build_dir.exported_squashed_image), str(tmp_dir))

        filesystem_path = os.path.join(tmp_dir, image_filesystem)

        with open(filesystem_path, 'rb') as f:
            # this part is 'not ideal' but this function seems to be a prerequisite
            # for building flatpak image since it does the setup for it
            flatpak_filesystem, flatpak_manifest = builder._export_from_stream(
                f)

        self.log.info('filesystem tarfile written to %s', flatpak_filesystem)

        image_rpm_components = builder.get_components(flatpak_manifest)

        ref_name, outfile, _ = builder.build_container(flatpak_filesystem)

        metadata = get_exported_image_metadata(outfile, IMAGE_TYPE_OCI)
        metadata['ref_name'] = ref_name

        self.log.info('OCI image is available as %s', outfile)

        shutil.rmtree(tmp_dir)

        return {'metadata': metadata, 'components': image_rpm_components}
예제 #23
0
    def run(self):
        if is_flatpak_build(self.workflow):
            # We'll extract the filesystem anyways for a Flatpak instead of exporting
            # the docker image directly, so squash just slows things down.
            self.log.info('flatpak build, skipping plugin')
            return

        # This plugin is obsoleted. This line change is just for the test pass.
        if getattr(self.workflow, "skip_layer_squash", False):
            return  # enable build plugins to prevent unnecessary squashes
        if self.save_archive:
            output_path = os.path.join(self.workflow.source.workdir,
                                       EXPORTED_SQUASHED_IMAGE_NAME)
            metadata = {"path": output_path}
        else:
            output_path = None

        # Squash the image and output tarfile
        # If the parameter dont_load is set to True squashed image won't be
        # loaded in to Docker daemon. If it's set to False it will be loaded.
        new_id = Squash(log=self.log,
                        image=self.image,
                        from_layer=self.from_layer,
                        tag=self.tag,
                        output_path=output_path,
                        load_image=not self.dont_load).run()

        if ':' not in new_id:
            # Older versions of the daemon do not include the prefix
            new_id = 'sha256:{}'.format(new_id)

        if not self.dont_load:
            self.workflow.data.image_id = new_id

        if self.save_archive:
            metadata.update(
                get_exported_image_metadata(output_path,
                                            IMAGE_TYPE_DOCKER_ARCHIVE))
            # OSBS2 TBD exported_image_sequence will not work for multiple platform
            self.workflow.data.exported_image_sequence.append(metadata)
예제 #24
0
 def run(self):
     if self.dont_load:
         metadata = {}
         self.workflow.exported_image_sequence.append(metadata)
         metadata["path"] = \
             os.path.join(self.workflow.source.workdir, EXPORTED_SQUASHED_IMAGE_NAME)
         # squash the image, don't load it back to docker
         Squash(log=self.log,
                image=self.image,
                from_layer=self.from_layer,
                tag=self.tag,
                output_path=metadata["path"]).run()
         metadata.update(get_exported_image_metadata(metadata["path"]))
     else:
         # squash the image and load it back to engine
         new_id = Squash(log=self.log,
                         image=self.image,
                         from_layer=self.from_layer,
                         tag=self.tag).run()
         self.workflow.builder.image_id = new_id
     if self.remove_former_image:
         self.tasker.remove_image(self.image)
예제 #25
0
    def run(self):
        """
        Build image inside current environment using imagebuilder;
        It's expected this may run within (privileged) docker container.

        Returns:
            BuildResult
        """
        builder = self.workflow.builder

        image = builder.image.to_str()
        # TODO: directly invoke go imagebuilder library in shared object via python module
        kwargs = dict(stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        if not PY2:
            kwargs['encoding'] = 'utf-8'
        ib_process = subprocess.Popen(
            ['imagebuilder', '-t', image, builder.df_dir], **kwargs)

        self.log.debug(
            'imagebuilder build has begun; waiting for it to finish')
        (output, last_error) = ([], None)
        while True:
            poll = ib_process.poll()
            out = sixdecode(ib_process.stdout.readline())
            if out:
                self.log.info(out.strip())
                output.append(out)
            err = sixdecode(ib_process.stderr.readline())
            if err:
                self.log.error(err.strip())
                output.append(err)  # include stderr with stdout
                last_error = err  # while noting the final line
            if out == '' and err == '':
                if poll is not None:
                    break
                time.sleep(0.1)  # don't busy-wait when there's no output

        if ib_process.returncode != 0:
            # imagebuilder uses stderr for normal output too; so in the case of an apparent
            # failure, single out the last line to include in the failure summary.
            err = last_error or "<imagebuilder had bad exit code but no error output>"
            return BuildResult(
                logs=output,
                fail_reason="image build failed (rc={}): {}".format(
                    ib_process.returncode, err),
            )

        image_id = builder.get_built_image_info()['Id']
        if ':' not in image_id:
            # Older versions of the daemon do not include the prefix
            image_id = 'sha256:{}'.format(image_id)

        # since we need no squash, export the image for local operations like squash would have
        self.log.info("fetching image %s from docker", image)
        output_path = os.path.join(self.workflow.source.workdir,
                                   EXPORTED_SQUASHED_IMAGE_NAME)
        with open(output_path, "w") as image_file:
            image_file.write(self.tasker.d.get_image(image).data)
        img_metadata = get_exported_image_metadata(output_path,
                                                   IMAGE_TYPE_DOCKER_ARCHIVE)
        self.workflow.exported_image_sequence.append(img_metadata)

        return BuildResult(logs=output,
                           image_id=image_id,
                           skip_layer_squash=True)
예제 #26
0
def test_tag_and_push_plugin_oci(tmpdir, monkeypatch, use_secret, fail_push,
                                 caplog, reactor_config_map):

    # For now, we don't want to require having a skopeo and an OCI-supporting
    # registry in the test environment
    if MOCK:
        mock_docker()
    else:
        return

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow({
        "provider": "git",
        "uri": "asd"
    }, TEST_IMAGE)
    setattr(workflow, 'builder', X)

    secret_path = None
    if use_secret:
        temp_dir = mkdtemp()
        with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig:
            dockerconfig_contents = {
                LOCALHOST_REGISTRY: {
                    "username": "******",
                    "email": "*****@*****.**",
                    "password": "******"
                }
            }
            dockerconfig.write(json.dumps(dockerconfig_contents))
            dockerconfig.flush()
            secret_path = temp_dir

    CONFIG_DIGEST = 'sha256:b79482f7dcab2a326c1e8c7025a4336d900e99f50db8b35a659fda67b5ebb3c2'
    MEDIA_TYPE = 'application/vnd.oci.image.manifest.v1+json'
    REF_NAME = "app/org.gnome.eog/x86_64/master"

    manifest_json = {
        "schemaVersion":
        2,
        "mediaType":
        "application/vnd.oci.image.manifest.v1+json",
        "config": {
            "mediaType": MEDIA_TYPE,
            "digest": CONFIG_DIGEST,
            "size": 314
        },
        "layers": [{
            "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
            "digest":
            "sha256:fd2b341d2ff3751ecdee8d8daacaa650d8a1703360c85d4cfc452d6ec32e147f",
            "size": 1863477
        }],
        "annotations": {
            "org.flatpak.commit-metadata.xa.ref":
            "YXBwL29yZy5nbm9tZS5lb2cveDg2XzY0L21hc3RlcgAAcw==",  # noqa
            "org.flatpak.body":
            "Name: org.gnome.eog\nArch: x86_64\nBranch: master\nBuilt with: Flatpak 0.9.7\n",  # noqa
            "org.flatpak.commit-metadata.xa.metadata":
            "W0FwcGxpY2F0aW9uXQpuYW1lPW9yZy5nbm9tZS5lb2cKcnVudGltZT1vcmcuZmVkb3JhcHJvamVjdC5QbGF0Zm9ybS94ODZfNjQvMjYKc2RrPW9yZy5mZWRvcmFwcm9qZWN0LlBsYXRmb3JtL3g4Nl82NC8yNgpjb21tYW5kPWVvZwoKW0NvbnRleHRdCnNoYXJlZD1pcGM7CnNvY2tldHM9eDExO3dheWxhbmQ7c2Vzc2lvbi1idXM7CmZpbGVzeXN0ZW1zPXhkZy1ydW4vZGNvbmY7aG9zdDt+Ly5jb25maWcvZGNvbmY6cm87CgpbU2Vzc2lvbiBCdXMgUG9saWN5XQpjYS5kZXNydC5kY29uZj10YWxrCgpbRW52aXJvbm1lbnRdCkRDT05GX1VTRVJfQ09ORklHX0RJUj0uY29uZmlnL2Rjb25mCgAAcw==",  # noqa
            "org.flatpak.download-size": "1863477",
            "org.flatpak.commit-metadata.xa.download-size": "AAAAAAAdF/IAdA==",
            "org.flatpak.commit-metadata.xa.installed-size":
            "AAAAAABDdgAAdA==",
            "org.flatpak.subject": "Export org.gnome.eog",
            "org.flatpak.installed-size": "4421120",
            "org.flatpak.commit":
            "d7b8789350660724b20643ebb615df466566b6d04682fa32800d3f10116eec54",  # noqa
            "org.flatpak.metadata":
            "[Application]\nname=org.gnome.eog\nruntime=org.fedoraproject.Platform/x86_64/26\nsdk=org.fedoraproject.Platform/x86_64/26\ncommand=eog\n\n[Context]\nshared=ipc;\nsockets=x11;wayland;session-bus;\nfilesystems=xdg-run/dconf;host;~/.config/dconf:ro;\n\n[Session Bus Policy]\nca.desrt.dconf=talk\n\n[Environment]\nDCONF_USER_CONFIG_DIR=.config/dconf\n",  # noqa
            "org.opencontainers.image.ref.name": REF_NAME,
            "org.flatpak.timestamp": "1499376525"
        }
    }

    config_json = {
        "created": "2017-07-06T21:28:45Z",
        "architecture": "arm64",
        "os": "linux",
        "config": {
            "Memory": 0,
            "MemorySwap": 0,
            "CpuShares": 0
        },
        "rootfs": {
            "type":
            "layers",
            "diff_ids": [
                "sha256:4c5160fea65110aa1eb8ca022e2693bb868367c2502855887f21c77247199339"
            ]
        }
    }

    # Add a mock OCI image to exported_image_sequence; this forces the tag_and_push
    # plugin to push with skopeo rather than with 'docker push'

    # Since we are always mocking the push for now, we can get away with a stub image
    oci_dir = os.path.join(str(tmpdir), 'oci-image')
    os.mkdir(oci_dir)
    with open(os.path.join(oci_dir, "index.json"), "w") as f:
        f.write('"Not a real index.json"')
    with open(os.path.join(oci_dir, "oci-layout"), "w") as f:
        f.write('{"imageLayoutVersion": "1.0.0"}')
    os.mkdir(os.path.join(oci_dir, 'blobs'))

    metadata = get_exported_image_metadata(oci_dir, IMAGE_TYPE_OCI)
    metadata['ref_name'] = REF_NAME
    workflow.exported_image_sequence.append(metadata)

    oci_tarpath = os.path.join(str(tmpdir), 'oci-image.tar')
    with open(oci_tarpath, "wb") as f:
        with tarfile.TarFile(mode="w", fileobj=f) as tf:
            for f in os.listdir(oci_dir):
                tf.add(os.path.join(oci_dir, f), f)

    metadata = get_exported_image_metadata(oci_tarpath, IMAGE_TYPE_OCI_TAR)
    metadata['ref_name'] = REF_NAME
    workflow.exported_image_sequence.append(metadata)

    # Mock the subprocess call to skopeo

    def check_check_output(args, **kwargs):
        if fail_push:
            raise subprocess.CalledProcessError(returncode=1,
                                                cmd=args,
                                                output="Failed")
        assert args[0] == 'skopeo'
        if use_secret:
            assert '--dest-creds=user:mypassword' in args
        assert '--dest-tls-verify=false' in args
        assert args[-2] == 'oci:' + oci_dir + ':' + REF_NAME
        assert args[
            -1] == 'docker://' + LOCALHOST_REGISTRY + '/' + TEST_IMAGE_NAME
        return ''

    (flexmock(subprocess).should_receive("check_output").once().replace_with(
        check_check_output))

    # Mock out the response from the registry once the OCI image is uploaded

    manifest_latest_url = "https://{}/v2/{}/manifests/latest".format(
        LOCALHOST_REGISTRY, TEST_IMAGE)
    manifest_url = "https://{}/v2/{}/manifests/{}".format(
        LOCALHOST_REGISTRY, TEST_IMAGE, DIGEST_OCI)
    config_blob_url = "https://{}/v2/{}/blobs/{}".format(
        LOCALHOST_REGISTRY, TEST_IMAGE, CONFIG_DIGEST)

    manifest_response = requests.Response()
    (flexmock(manifest_response,
              raise_for_status=lambda: None,
              json=manifest_json,
              headers={
                  'Content-Type': MEDIA_TYPE,
                  'Docker-Content-Digest': DIGEST_OCI
              }))

    manifest_unacceptable_response = requests.Response()
    (flexmock(manifest_unacceptable_response,
              status_code=404,
              json={"errors": [{
                  "code": "MANIFEST_UNKNOWN"
              }]}))

    config_blob_response = requests.Response()
    (flexmock(config_blob_response,
              raise_for_status=lambda: None,
              json=config_json))

    def custom_get(method, url, headers, **kwargs):
        if url == manifest_latest_url:
            if headers['Accept'] == MEDIA_TYPE:
                return manifest_response
            else:
                return manifest_unacceptable_response

        if url == manifest_url:
            return manifest_response

        if url == config_blob_url:
            return config_blob_response

    mock_get_retry_session()

    (flexmock(
        requests.Session).should_receive('request').replace_with(custom_get))

    if reactor_config_map:
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
        workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
            ReactorConfig({'version': 1, 'registries': [{
                'url': LOCALHOST_REGISTRY,
                'insecure': True,
                'auth': {'cfg_path': secret_path},
            }]})

    runner = PostBuildPluginsRunner(tasker, workflow,
                                    [{
                                        'name': TagAndPushPlugin.key,
                                        'args': {
                                            'registries': {
                                                LOCALHOST_REGISTRY: {
                                                    'insecure': True,
                                                    'secret': secret_path
                                                }
                                            }
                                        },
                                    }])

    with caplog.at_level(logging.DEBUG):
        if fail_push:
            with pytest.raises(PluginFailedException):
                output = runner.run()
        else:
            output = runner.run()

    for r in caplog.records:
        assert 'mypassword' not in r.getMessage()

    if not fail_push:
        image = output[TagAndPushPlugin.key][0]
        tasker.remove_image(image)
        assert len(workflow.push_conf.docker_registries) > 0

        assert workflow.push_conf.docker_registries[0].digests[
            TEST_IMAGE_NAME].v1 is None
        assert workflow.push_conf.docker_registries[0].digests[
            TEST_IMAGE_NAME].v2 is None
        assert workflow.push_conf.docker_registries[0].digests[
            TEST_IMAGE_NAME].oci == DIGEST_OCI

        assert workflow.push_conf.docker_registries[0].config is config_json
def test_tag_and_push_plugin_oci(
        tmpdir, monkeypatch, use_secret, fail_push, caplog, reactor_config_map):

    # For now, we don't want to require having a skopeo and an OCI-supporting
    # registry in the test environment
    if MOCK:
        mock_docker()
    else:
        return

    tasker = DockerTasker()
    workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE)
    setattr(workflow, 'builder', X)

    secret_path = None
    if use_secret:
        temp_dir = mkdtemp()
        with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig:
            dockerconfig_contents = {
                LOCALHOST_REGISTRY: {
                    "username": "******", "email": "*****@*****.**", "password": "******"}}
            dockerconfig.write(json.dumps(dockerconfig_contents))
            dockerconfig.flush()
            secret_path = temp_dir

    CONFIG_DIGEST = 'sha256:b79482f7dcab2a326c1e8c7025a4336d900e99f50db8b35a659fda67b5ebb3c2'
    MEDIA_TYPE = 'application/vnd.oci.image.manifest.v1+json'
    REF_NAME = "app/org.gnome.eog/x86_64/master"

    manifest_json = {
        "schemaVersion": 2,
        "mediaType": "application/vnd.oci.image.manifest.v1+json",
        "config": {
            "mediaType": MEDIA_TYPE,
            "digest": CONFIG_DIGEST,
            "size": 314
        },
        "layers": [
            {
                "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
                "digest": "sha256:fd2b341d2ff3751ecdee8d8daacaa650d8a1703360c85d4cfc452d6ec32e147f",
                "size": 1863477
            }
        ],
        "annotations": {
            "org.flatpak.commit-metadata.xa.ref": "YXBwL29yZy5nbm9tZS5lb2cveDg2XzY0L21hc3RlcgAAcw==",  # noqa
            "org.flatpak.body": "Name: org.gnome.eog\nArch: x86_64\nBranch: master\nBuilt with: Flatpak 0.9.7\n",  # noqa
            "org.flatpak.commit-metadata.xa.metadata": "W0FwcGxpY2F0aW9uXQpuYW1lPW9yZy5nbm9tZS5lb2cKcnVudGltZT1vcmcuZmVkb3JhcHJvamVjdC5QbGF0Zm9ybS94ODZfNjQvMjYKc2RrPW9yZy5mZWRvcmFwcm9qZWN0LlBsYXRmb3JtL3g4Nl82NC8yNgpjb21tYW5kPWVvZwoKW0NvbnRleHRdCnNoYXJlZD1pcGM7CnNvY2tldHM9eDExO3dheWxhbmQ7c2Vzc2lvbi1idXM7CmZpbGVzeXN0ZW1zPXhkZy1ydW4vZGNvbmY7aG9zdDt+Ly5jb25maWcvZGNvbmY6cm87CgpbU2Vzc2lvbiBCdXMgUG9saWN5XQpjYS5kZXNydC5kY29uZj10YWxrCgpbRW52aXJvbm1lbnRdCkRDT05GX1VTRVJfQ09ORklHX0RJUj0uY29uZmlnL2Rjb25mCgAAcw==",  # noqa
            "org.flatpak.download-size": "1863477",
            "org.flatpak.commit-metadata.xa.download-size": "AAAAAAAdF/IAdA==",
            "org.flatpak.commit-metadata.xa.installed-size": "AAAAAABDdgAAdA==",
            "org.flatpak.subject": "Export org.gnome.eog",
            "org.flatpak.installed-size": "4421120",
            "org.flatpak.commit": "d7b8789350660724b20643ebb615df466566b6d04682fa32800d3f10116eec54",  # noqa
            "org.flatpak.metadata": "[Application]\nname=org.gnome.eog\nruntime=org.fedoraproject.Platform/x86_64/26\nsdk=org.fedoraproject.Platform/x86_64/26\ncommand=eog\n\n[Context]\nshared=ipc;\nsockets=x11;wayland;session-bus;\nfilesystems=xdg-run/dconf;host;~/.config/dconf:ro;\n\n[Session Bus Policy]\nca.desrt.dconf=talk\n\n[Environment]\nDCONF_USER_CONFIG_DIR=.config/dconf\n",  # noqa
            "org.opencontainers.image.ref.name": REF_NAME,
            "org.flatpak.timestamp": "1499376525"
        }
    }

    config_json = {
        "created": "2017-07-06T21:28:45Z",
        "architecture": "arm64",
        "os": "linux",
        "config": {
            "Memory": 0,
            "MemorySwap": 0,
            "CpuShares": 0
        },
        "rootfs": {
            "type": "layers",
            "diff_ids": [
                "sha256:4c5160fea65110aa1eb8ca022e2693bb868367c2502855887f21c77247199339"
            ]
        }
    }

    # Add a mock OCI image to exported_image_sequence; this forces the tag_and_push
    # plugin to push with skopeo rather than with 'docker push'

    # Since we are always mocking the push for now, we can get away with a stub image
    oci_dir = os.path.join(str(tmpdir), 'oci-image')
    os.mkdir(oci_dir)
    with open(os.path.join(oci_dir, "index.json"), "w") as f:
        f.write('"Not a real index.json"')
    with open(os.path.join(oci_dir, "oci-layout"), "w") as f:
        f.write('{"imageLayoutVersion": "1.0.0"}')
    os.mkdir(os.path.join(oci_dir, 'blobs'))

    metadata = get_exported_image_metadata(oci_dir, IMAGE_TYPE_OCI)
    metadata['ref_name'] = REF_NAME
    workflow.exported_image_sequence.append(metadata)

    oci_tarpath = os.path.join(str(tmpdir), 'oci-image.tar')
    with open(oci_tarpath, "wb") as f:
        with tarfile.TarFile(mode="w", fileobj=f) as tf:
            for f in os.listdir(oci_dir):
                tf.add(os.path.join(oci_dir, f), f)

    metadata = get_exported_image_metadata(oci_tarpath, IMAGE_TYPE_OCI_TAR)
    metadata['ref_name'] = REF_NAME
    workflow.exported_image_sequence.append(metadata)

    # Mock the subprocess call to skopeo

    def check_check_output(args, **kwargs):
        if fail_push:
            raise subprocess.CalledProcessError(returncode=1, cmd=args, output="Failed")
        assert args[0] == 'skopeo'
        if use_secret:
            assert '--authfile=' + os.path.join(secret_path, '.dockercfg') in args
        assert '--dest-tls-verify=false' in args
        assert args[-2] == 'oci:' + oci_dir + ':' + REF_NAME
        assert args[-1] == 'docker://' + LOCALHOST_REGISTRY + '/' + TEST_IMAGE_NAME
        return ''

    (flexmock(subprocess)
     .should_receive("check_output")
     .once()
     .replace_with(check_check_output))

    # Mock out the response from the registry once the OCI image is uploaded

    manifest_latest_url = "https://{}/v2/{}/manifests/latest".format(LOCALHOST_REGISTRY, TEST_IMAGE)
    manifest_url = "https://{}/v2/{}/manifests/{}".format(
        LOCALHOST_REGISTRY, TEST_IMAGE, DIGEST_OCI)
    config_blob_url = "https://{}/v2/{}/blobs/{}".format(
        LOCALHOST_REGISTRY, TEST_IMAGE, CONFIG_DIGEST)

    manifest_response = requests.Response()
    (flexmock(manifest_response,
              raise_for_status=lambda: None,
              json=manifest_json,
              headers={
                'Content-Type': MEDIA_TYPE,
                'Docker-Content-Digest': DIGEST_OCI
              }))

    manifest_unacceptable_response = requests.Response()
    (flexmock(manifest_unacceptable_response,
              status_code=404,
              json={
                  "errors": [{"code": "MANIFEST_UNKNOWN"}]
              }))

    config_blob_response = requests.Response()
    (flexmock(config_blob_response, raise_for_status=lambda: None, json=config_json))

    def custom_get(method, url, headers, **kwargs):
        if url == manifest_latest_url:
            if headers['Accept'] == MEDIA_TYPE:
                return manifest_response
            else:
                return manifest_unacceptable_response

        if url == manifest_url:
            return manifest_response

        if url == config_blob_url:
            return config_blob_response

    mock_get_retry_session()

    (flexmock(requests.Session)
        .should_receive('request')
        .replace_with(custom_get))

    if reactor_config_map:
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
        workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
            ReactorConfig({'version': 1, 'registries': [{
                'url': LOCALHOST_REGISTRY,
                'insecure': True,
                'auth': {'cfg_path': secret_path},
            }]})

    runner = PostBuildPluginsRunner(
        tasker,
        workflow,
        [{
            'name': TagAndPushPlugin.key,
            'args': {
                'registries': {
                    LOCALHOST_REGISTRY: {
                        'insecure': True,
                        'secret': secret_path
                    }
                }
            },
        }]
    )

    with caplog.at_level(logging.DEBUG):
        if fail_push:
            with pytest.raises(PluginFailedException):
                output = runner.run()
        else:
            output = runner.run()

    if not fail_push:
        image = output[TagAndPushPlugin.key][0]
        tasker.remove_image(image)
        assert len(workflow.push_conf.docker_registries) > 0

        assert workflow.push_conf.docker_registries[0].digests[TEST_IMAGE_NAME].v1 is None
        assert workflow.push_conf.docker_registries[0].digests[TEST_IMAGE_NAME].v2 is None
        assert workflow.push_conf.docker_registries[0].digests[TEST_IMAGE_NAME].oci == DIGEST_OCI

        assert workflow.push_conf.docker_registries[0].config is config_json
    def run(self):
        """
        Build image inside current environment using imagebuilder;
        It's expected this may run within (privileged) docker container.

        Returns:
            BuildResult
        """
        builder = self.workflow.builder

        image = builder.image.to_str()
        # TODO: directly invoke go imagebuilder library in shared object via python module
        kwargs = dict(stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
        encoding_params = dict(encoding='utf-8', errors='replace')
        if not PY2:
            kwargs.update(encoding_params)

        allow_repo_dir_in_dockerignore(builder.df_dir)
        ib_process = subprocess.Popen(['imagebuilder', '-t', image, builder.df_dir], **kwargs)

        self.log.debug('imagebuilder build has begun; waiting for it to finish')
        output = []
        while True:
            poll = ib_process.poll()
            out = ib_process.stdout.readline()
            out = out.decode(**encoding_params) if PY2 else out
            if out:
                self.log.info('%s', out.rstrip())
                output.append(out)
            elif poll is not None:
                break

        if ib_process.returncode != 0:
            # in the case of an apparent failure, single out the last line to
            # include in the failure summary.
            err = output[-1] if output else "<imagebuilder had bad exit code but no output>"
            return BuildResult(
                logs=output,
                fail_reason="image build failed (rc={}): {}".format(ib_process.returncode, err),
            )

        image_id = builder.get_built_image_info()['Id']
        if ':' not in image_id:
            # Older versions of the daemon do not include the prefix
            image_id = 'sha256:{}'.format(image_id)

        # since we need no squash, export the image for local operations like squash would have
        self.log.info("fetching image %s from docker", image)
        output_path = os.path.join(self.workflow.source.workdir, EXPORTED_SQUASHED_IMAGE_NAME)
        try:
            # docker-py 1.x
            with open(output_path, "w") as image_file:
                image_file.write(self.tasker.d.get_image(image).data)
        except AttributeError:
            # docker-py 3.x
            with open(output_path, "wb") as image_file:
                for chunk in self.tasker.d.get_image(image):
                    image_file.write(chunk)

        img_metadata = get_exported_image_metadata(output_path, IMAGE_TYPE_DOCKER_ARCHIVE)
        self.workflow.exported_image_sequence.append(img_metadata)

        return BuildResult(logs=output, image_id=image_id, skip_layer_squash=True)
def test_running_build(workflow, caplog,
                       sources_dir, sources_dir_exists, sources_dir_empty,
                       remote_dir, remote_dir_exists, remote_dir_empty,
                       maven_dir, maven_dir_exists, maven_dir_empty,
                       export_failed):
    """
    Test if proper result is returned and if plugin works
    """
    build_sources_dir = workflow.build_dir.source_container_sources_dir
    sources_dir_path = build_sources_dir / sources_dir
    if sources_dir_exists:
        sources_dir_path.mkdir()
        if not sources_dir_empty:
            os.mknod(sources_dir_path / 'stub.srpm')

    remote_dir_path = build_sources_dir / remote_dir
    if remote_dir_exists:
        remote_dir_path.mkdir()
        if not remote_dir_empty:
            os.mknod(remote_dir_path / 'remote-sources-first.tar.gz')
            os.mknod(remote_dir_path / 'remote-sources-second.tar.gz')

    maven_dir_path = build_sources_dir / maven_dir
    if maven_dir_exists:
        maven_dir_path.mkdir()
        if not maven_dir_empty:
            os.mkdir(maven_dir_path / 'maven-sources-1')
            os.mknod(maven_dir_path / 'maven-sources-1' / 'maven-sources-1.tar.gz')

    workflow.build_dir.init_build_dirs(["noarch"], workflow.source)
    workflow.data.prebuild_results[PLUGIN_FETCH_SOURCES_KEY] = {
        'image_sources_dir': str(sources_dir_path),
        'remote_sources_dir': str(remote_dir_path),
        'maven_sources_dir': str(maven_dir_path),
    }

    runner = BuildStepPluginsRunner(
        workflow,
        [{
            'name': SourceContainerPlugin.key,
            'args': {},
        }]
    )

    temp_image_output_dir = workflow.build_dir.source_container_output_dir
    exported_image_file = workflow.build_dir.any_platform.exported_squashed_image
    temp_image_export_dir = exported_image_file.parent
    tempfile_chain = (flexmock(tempfile)
                      .should_receive("mkdtemp")
                      .and_return(str(temp_image_output_dir)))
    tempfile_chain.and_return(str(temp_image_export_dir))
    temp_image_export_dir.mkdir(parents=True, exist_ok=True)
    temp_image_output_dir.joinpath('blobs', 'sha256').mkdir(parents=True, exist_ok=True)
    # temp dir created by bsi
    flexmock(os).should_receive('getcwd').and_return(str(workflow.build_dir.path))
    temp_bsi_dir = workflow.build_dir.path / 'SrcImg'
    temp_bsi_dir.mkdir()

    def check_run_skopeo(args):
        """Mocked call to skopeo"""
        assert args[0] == 'skopeo'
        assert args[1] == 'copy'
        assert args[2] == 'oci:%s' % temp_image_output_dir
        assert args[3] == f'docker-archive:{exported_image_file}'

        if export_failed:
            raise subprocess.CalledProcessError(returncode=1, cmd=args, output="Failed")

        return ''

    def check_check_output(args, **kwargs):
        """Mocked check_output call for bsi"""
        args_expect = ['bsi', '-d']
        drivers = set()
        if sources_dir and sources_dir_exists:
            drivers.add('sourcedriver_rpm_dir')
        if remote_dir and remote_dir_exists:
            drivers.add('sourcedriver_extra_src_dir')
        if maven_dir and maven_dir_exists:
            drivers.add('sourcedriver_extra_src_dir')
        args_expect.append(','.join(drivers))

        if sources_dir and sources_dir_exists:
            args_expect.append('-s')
            args_expect.append(str(sources_dir_path))
        if remote_dir and remote_dir_exists:
            for count in range(len(os.listdir(remote_dir_path))):
                args_expect.append('-e')
                args_expect.append(str(remote_dir_path / f"remote_source_{count}"))
        if maven_dir and maven_dir_exists:
            for maven_subdir in os.listdir(maven_dir_path):
                args_expect.append('-e')
                args_expect.append(str(maven_dir_path / maven_subdir))
        args_expect.append('-o')
        args_expect.append(str(temp_image_output_dir))

        assert args == args_expect
        return 'stub stdout'

    any_sources = any([sources_dir_exists, remote_dir_exists, maven_dir_exists])

    (flexmock(retries)
     .should_receive("run_cmd")
     .times(1 if any_sources else 0)
     .replace_with(check_run_skopeo))

    (flexmock(subprocess)
     .should_receive("check_output")
     .times(1 if any_sources else 0)
     .replace_with(check_check_output))

    blob_sha = "f568c411849e21aa3917973f1c5b120f6b52fe69b1944dfb977bc11bed6fbb6d"
    index_json = {"schemaVersion": 2,
                  "manifests":
                      [{"mediaType": "application/vnd.oci.image.manifest.v1+json",
                        "digest": "sha256:%s" % blob_sha,
                        "size": 645,
                        "annotations": {"org.opencontainers.image.ref.name": "latest-source"},
                        "platform": {"architecture": "amd64", "os": "linux"}}]}
    blob_json = {"schemaVersion": 2, "layers": []}
    expected_exported_image_metadata = {}

    temp_image_output_dir.joinpath("index.json").write_text(json.dumps(index_json), "utf-8")
    temp_image_output_dir.joinpath("blobs", "sha256", blob_sha).write_text(
        json.dumps(blob_json), "utf-8"
    )

    if not export_failed:
        export_tar = workflow.build_dir.any_platform.exported_squashed_image
        with open(export_tar, "wb") as f:
            with tarfile.TarFile(mode="w", fileobj=f) as tf:
                for f in os.listdir(temp_image_output_dir):
                    tf.add(str(temp_image_output_dir / f), f)
        expected_exported_image_metadata = get_exported_image_metadata(str(export_tar),
                                                                       IMAGE_TYPE_DOCKER_ARCHIVE)

    if not any([sources_dir_exists, remote_dir_exists, maven_dir_exists]):
        with pytest.raises(PluginFailedException) as exc_info:
            runner.run()
        err_msg = exc_info.value.args[0]
        assert re.search("No SRPMs directory", err_msg)
        assert re.search("No Remote source directory", err_msg)
        assert re.search("No Maven source directory", err_msg)

        err_msg = f"No SRPMs directory '{sources_dir_path}' available"
        err_msg += f"\nNo Remote source directory '{remote_dir_path}' available"
        err_msg += f"\nNo Maven source directory '{maven_dir_path}' available"
        # Since Python 3.7 logger adds additional whitespaces by default -> checking without them
        assert re.sub(r'\s+', " ", err_msg) in re.sub(r'\s+', " ", caplog.text)
        assert workflow.build_process_failed

    elif export_failed:
        with pytest.raises(PluginFailedException):
            runner.run()
    else:
        results = runner.run()
        assert results.keys() == {'image_metadata', 'logs', 'skip_layer_squash'}
        assert results['logs'] == ['stub stdout']
        assert results['skip_layer_squash']
        assert results['image_metadata'] == expected_exported_image_metadata
        assert 'stub stdout' in caplog.text
        empty_srpm_msg = f"SRPMs directory '{sources_dir_path}' is empty"
        empty_remote_msg = f"Remote source directory '{remote_dir_path}' is empty"
        empty_maven_msg = f"Maven source directory '{maven_dir_path}' is empty"
        if sources_dir_exists and sources_dir_empty:
            assert empty_srpm_msg in caplog.text
        else:
            assert empty_srpm_msg not in caplog.text
        if remote_dir_exists and remote_dir_empty:
            assert empty_remote_msg in caplog.text
        else:
            assert empty_remote_msg not in caplog.text
        if maven_dir_exists and maven_dir_empty:
            assert empty_maven_msg in caplog.text
        else:
            assert empty_maven_msg not in caplog.text

        remove_srpm_msg = f"Will remove directory with downloaded srpms: {sources_dir_path}"
        remove_remote_msg = f"Will remove directory with downloaded remote sources: " \
                            f"{remote_dir_path}"
        remove_maven_msg = f"Will remove directory with downloaded maven sources: " \
                           f"{maven_dir_path}"
        if sources_dir_exists:
            assert remove_srpm_msg in caplog.text
        else:
            assert remove_srpm_msg not in caplog.text
        if remote_dir_exists:
            assert remove_remote_msg in caplog.text
        else:
            assert remove_remote_msg not in caplog.text
        if maven_dir_exists:
            assert remove_maven_msg in caplog.text
        else:
            assert remove_maven_msg not in caplog.text

        remove_unpacked_msg = f"Will remove unpacked image directory: {temp_image_output_dir}"
        assert remove_unpacked_msg in caplog.text

        remove_tmpbsi_msg = f"Will remove BSI temporary directory: {temp_bsi_dir}"
        assert remove_tmpbsi_msg in caplog.text
예제 #30
0
    def run(self):
        """
        Build image inside current environment using imagebuilder;
        It's expected this may run within (privileged) docker container.

        Returns:
            BuildResult
        """
        builder = self.workflow.builder

        image = builder.image.to_str()

        allow_repo_dir_in_dockerignore(builder.df_dir)

        process_args = ['imagebuilder', '-t', image]
        for buildarg, buildargval in builder.buildargs.items():
            process_args.append('--build-arg')
            process_args.append('%s=%s' % (buildarg, buildargval))
        process_args.append(builder.df_dir)

        ib_process = subprocess.Popen(process_args,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.STDOUT,
                                      universal_newlines=True,
                                      encoding='utf-8',
                                      errors='replace')

        self.log.debug(
            'imagebuilder build has begun; waiting for it to finish')
        self.log.debug(process_args)
        output = []
        while True:
            poll = ib_process.poll()
            out = ib_process.stdout.readline()
            if out:
                self.log.info('%s', out.rstrip())
                output.append(out)
            elif poll is not None:
                break

        if ib_process.returncode != 0:
            # in the case of an apparent failure, single out the last line to
            # include in the failure summary.
            err = output[
                -1] if output else "<imagebuilder had bad exit code but no output>"
            return BuildResult(
                logs=output,
                fail_reason="image build failed (rc={}): {}".format(
                    ib_process.returncode, err),
            )

        image_id = builder.get_built_image_info()['Id']
        if ':' not in image_id:
            # Older versions of the daemon do not include the prefix
            image_id = 'sha256:{}'.format(image_id)

        # since we need no squash, export the image for local operations like squash would have
        self.log.info("fetching image %s from docker", image)
        output_path = os.path.join(self.workflow.source.workdir,
                                   EXPORTED_SQUASHED_IMAGE_NAME)
        try:
            # docker-py 1.x
            with open(output_path, "w") as image_file:
                image_file.write(self.tasker.get_image(image).data)
        except AttributeError:
            # docker-py 3.x
            with open(output_path, "wb") as image_file:
                for chunk in self.tasker.get_image(image):
                    image_file.write(chunk)

        img_metadata = get_exported_image_metadata(output_path,
                                                   IMAGE_TYPE_DOCKER_ARCHIVE)
        self.workflow.exported_image_sequence.append(img_metadata)

        return BuildResult(logs=output,
                           image_id=image_id,
                           skip_layer_squash=True)