def mock_environment(tmpdir,
                     session=None,
                     build_process_failed=False,
                     koji_build_id=None,
                     use_import=False):
    if session is None:
        session = MockedClientSession('')

    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(SOURCE, 'test-image')
    setattr(workflow, 'builder', X())
    setattr(workflow.builder, 'image_id', '123456imageid')
    setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='22'))
    setattr(workflow.builder, 'source', X())
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', None)

    flexmock(koji, ClientSession=lambda hub, opts: session)

    if build_process_failed:
        workflow.build_result = BuildResult(fail_reason="not built")
    else:
        workflow.build_result = BuildResult(image_id="id1234")

    if koji_build_id:
        if use_import:
            workflow.exit_results[KojiImportPlugin.key] = koji_build_id
        else:
            workflow.exit_results[KojiPromotePlugin.key] = koji_build_id

    (flexmock(time).should_receive('sleep').and_return(None))

    return tasker, workflow
    def run(self):
        """
        build image inside current environment;
        it's expected this may run within (privileged) docker container


        Input:
            df_dir
            image

        Output:
            BuildResult
            built_image_info
            image_id
        """
        builder = self.workflow.builder

        logs_gen = self.tasker.build_image_from_path(builder.df_dir,
                                                     builder.image)

        self.log.debug('build is submitted, waiting for it to finish')
        command_result = wait_for_command(logs_gen)

        if command_result.is_failed():
            return BuildResult(logs=command_result.logs,
                               fail_reason=command_result.error_detail)
        else:
            image_id = builder.get_built_image_info()['Id']
            return BuildResult(logs=command_result.logs, image_id=image_id)
Exemple #3
0
    def run(self):
        """
        Build image inside current environment using buildah;
        It's expected this may run within (privileged) oci container.

        Returns:
            BuildResult
        """
        builder = self.workflow.builder

        image = builder.image.to_str()
        kwargs = dict(stdout=subprocess.PIPE,
                      stderr=subprocess.STDOUT,
                      universal_newlines=True)
        encoding_params = dict(encoding='utf-8', errors='replace')
        kwargs.update(encoding_params)
        ib_process = subprocess.Popen(
            ['buildah', 'bud', '-t', image, builder.df_dir], **kwargs)

        self.log.debug('buildah build has begun; waiting for it to finish')
        output = []
        while True:
            poll = ib_process.poll()
            out = ib_process.stdout.readline()
            if out:
                self.log.info('%s', out.rstrip())
                output.append(out)
            elif poll is not None:
                break

        if ib_process.returncode != 0:
            # in the case of an apparent failure, single out the last line to
            # include in the failure summary.
            err = output[
                -1] if output else "<buildah had bad exit code but no output>"
            return BuildResult(
                logs=output,
                fail_reason="image build failed (rc={}): {}".format(
                    ib_process.returncode, err),
            )

        image_id = builder.get_built_image_info()['Id']
        if ':' not in image_id:
            # Older versions of the daemon do not include the prefix
            image_id = 'sha256:{}'.format(image_id)

        # since we need no squash, export the image for local operations like squash would have
        self.log.info("fetching image %s from docker", image)
        output_path = os.path.join(self.workflow.source.workdir,
                                   EXPORTED_SQUASHED_IMAGE_NAME)
        with open(output_path, "w") as image_file:
            image_file.write(self.tasker.get_image(image).data)
        img_metadata = get_exported_image_metadata(output_path,
                                                   IMAGE_TYPE_DOCKER_ARCHIVE)
        self.workflow.exported_image_sequence.append(img_metadata)

        return BuildResult(logs=output,
                           image_id=image_id,
                           skip_layer_squash=True)
    def test_compress(self, tmpdir, caplog, source_build, method,
                      load_exported_image, give_export, extension):
        if MOCK:
            mock_docker()

        tasker = DockerTasker()
        workflow = DockerBuildWorkflow('test-image',
                                       source={
                                           'provider': 'git',
                                           'uri': 'asd'
                                       })
        workflow.builder = X()
        exp_img = os.path.join(str(tmpdir), 'img.tar')

        if source_build:
            workflow.build_result = BuildResult(oci_image_path="oci_path")
        else:
            workflow.build_result = BuildResult(image_id="12345")

        if load_exported_image and give_export:
            tarfile.open(exp_img, mode='w').close()
            workflow.exported_image_sequence.append({
                'path':
                exp_img,
                'type':
                IMAGE_TYPE_DOCKER_ARCHIVE
            })
            tasker = None  # image provided, should not query docker

        runner = PostBuildPluginsRunner(
            tasker, workflow, [{
                'name': CompressPlugin.key,
                'args': {
                    'method': method,
                    'load_exported_image': load_exported_image,
                },
            }])

        if not extension:
            with pytest.raises(Exception) as excinfo:
                runner.run()
            assert 'Unsupported compression format' in str(excinfo.value)
            return

        runner.run()

        if source_build and not (give_export and load_exported_image):
            assert 'skipping, no exported source image to compress' in caplog.text
        else:
            compressed_img = os.path.join(
                workflow.source.tmpdir,
                EXPORTED_COMPRESSED_IMAGE_NAME_TEMPLATE.format(extension))
            assert os.path.exists(compressed_img)
            metadata = workflow.exported_image_sequence[-1]
            assert metadata['path'] == compressed_img
            assert metadata['type'] == IMAGE_TYPE_DOCKER_ARCHIVE
            assert 'uncompressed_size' in metadata
            assert isinstance(metadata['uncompressed_size'], integer_types)
            assert ", ratio: " in caplog.text
    def run(self):
        release = self.get_release()
        platforms = self.get_platforms()
        koji_upload_dir = self.get_koji_upload_dir()
        task_id = self.get_fs_task_id()

        thread_pool = ThreadPool(len(platforms))
        result = thread_pool.map_async(
            lambda cluster_info: self.do_worker_build(release, cluster_info,
                                                      koji_upload_dir, task_id),
            [self.choose_cluster(platform) for platform in platforms]
        )

        try:
            while not result.ready():
                # The wait call is a blocking call which prevents signals
                # from being processed. Wait for short intervals instead
                # of a single long interval, so build cancellation can
                # be handled virtually immediately.
                result.wait(1)
        # Always clean up worker builds on any error to avoid
        # runaway worker builds (includes orchestrator build cancellation)
        except Exception:
            self.log.info('build cancelled, cancelling worker builds')
            if self.worker_builds:
                ThreadPool(len(self.worker_builds)).map(
                    lambda bi: bi.cancel_build(), self.worker_builds)
            while not result.ready():
                result.wait(1)
            raise

        annotations = {'worker-builds': {
            build_info.platform: build_info.get_annotations()
            for build_info in self.worker_builds if build_info.build
        }}

        self._apply_repositories(annotations)

        labels = self._make_labels()

        fail_reasons = {
            build_info.platform: build_info.get_fail_reason()
            for build_info in self.worker_builds
            if not build_info.build or not build_info.build.is_succeeded()
        }

        self.workflow.plugin_workspace[self.key] = {
            WORKSPACE_KEY_UPLOAD_DIR: koji_upload_dir,
            WORKSPACE_KEY_BUILD_INFO: {build_info.platform: build_info
                                       for build_info in self.worker_builds},
        }

        if fail_reasons:
            return BuildResult(fail_reason=json.dumps(fail_reasons),
                               annotations=annotations, labels=labels)

        return BuildResult.make_remote_image_result(annotations, labels=labels)
    def run(self):
        if not self.platforms:
            raise RuntimeError("No enabled platform to build on")
        self.set_build_image()

        thread_pool = ThreadPool(len(self.platforms))
        result = thread_pool.map_async(self.select_and_start_cluster,
                                       self.platforms)

        try:
            result.get()
        # Always clean up worker builds on any error to avoid
        # runaway worker builds (includes orchestrator build cancellation)
        except Exception:
            thread_pool.terminate()
            self.log.info('build cancelled, cancelling worker builds')
            if self.worker_builds:
                ThreadPool(len(self.worker_builds)).map(
                    lambda bi: bi.cancel_build(), self.worker_builds)
            while not result.ready():
                result.wait(1)
            raise
        else:
            thread_pool.close()
            thread_pool.join()

        annotations = {
            'worker-builds': {
                build_info.platform: build_info.get_annotations()
                for build_info in self.worker_builds if build_info.build
            }
        }

        self._apply_repositories(annotations)

        labels = self._make_labels()

        fail_reasons = {
            build_info.platform: build_info.get_fail_reason()
            for build_info in self.worker_builds
            if not build_info.build or not build_info.build.is_succeeded()
        }

        workspace = self.workflow.plugin_workspace.setdefault(self.key, {})
        workspace[WORKSPACE_KEY_UPLOAD_DIR] = self.koji_upload_dir
        workspace[WORKSPACE_KEY_BUILD_INFO] = {
            build_info.platform: build_info
            for build_info in self.worker_builds
        }

        if fail_reasons:
            return BuildResult(fail_reason=json.dumps(fail_reasons),
                               annotations=annotations,
                               labels=labels)

        return BuildResult.make_remote_image_result(annotations, labels=labels)
def mock_environment(tmpdir, primary_images=None, worker_annotations={}):
    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(SOURCE, "test-image")
    base_image_id = '123456parent-id'
    setattr(workflow, '_base_image_inspect', {'Id': base_image_id})
    setattr(workflow, 'builder', X())
    setattr(workflow.builder, 'image_id', '123456imageid')
    setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='22'))
    setattr(workflow.builder, 'source', X())
    setattr(workflow.builder, 'built_image_info', {'ParentId': base_image_id})
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', None)
    setattr(workflow, 'tag_conf', TagConf())
    if primary_images:
        workflow.tag_conf.add_primary_images(primary_images)
        workflow.tag_conf.add_unique_image(primary_images[0])

    annotations = deepcopy(BUILD_ANNOTATIONS)
    if not worker_annotations:
        worker_annotations = {'ppc64le': PPC_ANNOTATIONS}
    for worker in worker_annotations:
        annotations['worker-builds'][worker] = deepcopy(
            worker_annotations[worker])

    workflow.build_result = BuildResult(image_id='123456',
                                        annotations=annotations)

    return tasker, workflow
Exemple #8
0
def test_get_primary_images(tag_conf, tag_annotation, expected):
    template_image = ImageName.parse('registry.example.com/fedora')
    workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')

    for tag in tag_conf:
        image_name = ImageName.parse(str(template_image))
        image_name.tag = tag
        workflow.tag_conf.add_primary_image(str(image_name))

    annotations = {}
    for tag in tag_annotation:
        annotations.setdefault('repositories', {}).setdefault('primary', [])
        image_name = ImageName.parse(str(template_image))
        image_name.tag = tag

        annotations['repositories']['primary'].append(str(image_name))

    build_result = BuildResult(annotations=annotations, image_id='foo')
    workflow.build_result = build_result

    actual = get_primary_images(workflow)
    assert len(actual) == len(expected)
    for index, primary_image in enumerate(actual):
        assert primary_image.registry == template_image.registry
        assert primary_image.namespace == template_image.namespace
        assert primary_image.repo == template_image.repo

        assert primary_image.tag == expected[index]
Exemple #9
0
def mock_environment(tmpdir, primary_images=None,
                     annotations=None):
    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(source=SOURCE)
    base_image_id = '123456parent-id'
    setattr(workflow, '_base_image_inspect', {'Id': base_image_id})
    setattr(workflow, 'builder', StubInsideBuilder())
    setattr(workflow.builder, 'image_id', '123456imageid')
    setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='22'))
    setattr(workflow.builder, 'source', StubInsideBuilder())
    setattr(workflow.builder, 'built_image_info', {'ParentId': base_image_id})
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', None)
    setattr(workflow, 'tag_conf', TagConf())
    if primary_images:
        for image in primary_images:
            if '-' in ImageName.parse(image).tag:
                workflow.tag_conf.add_primary_image(image)
        workflow.tag_conf.add_unique_image(primary_images[0])

    workflow.tag_conf.add_floating_image('namespace/httpd:floating')
    workflow.build_result = BuildResult(image_id='123456', annotations=annotations or {})

    return tasker, workflow
Exemple #10
0
def test_pulp_publish_delete(worker_builds_created, v1_image_ids, expected,
                             caplog, reactor_config_map):
    tasker, workflow = prepare(success=False, v1_image_ids=v1_image_ids)
    if not worker_builds_created:
        workflow.build_result = BuildResult(fail_reason="not built")

    if reactor_config_map:
        pulp_map = {'name': 'pulp_registry_name', 'auth': {}}
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
        workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
            ReactorConfig({'version': 1, 'pulp': pulp_map})

    plugin = PulpPublishPlugin(tasker, workflow, 'pulp_registry_name')
    msg = "removing ppc64le_v1_image_id from"

    (flexmock(dockpulp.Pulp).should_receive('crane').never())
    if expected:
        (flexmock(dockpulp.Pulp).should_receive('remove').with_args(
            unicode, unicode))
    else:
        (flexmock(dockpulp.Pulp).should_receive('remove').never())

    crane_images = plugin.run()

    assert crane_images == []
    if expected and worker_builds_created:
        assert msg in caplog.text()
    else:
        assert msg not in caplog.text()
Exemple #11
0
def mock_environment(tmpdir, workflow, primary_images=None, floating_images=None,
                     manifest_results=None, annotations=None):
    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    base_image_id = '123456parent-id'
    setattr(workflow, '_base_image_inspect', {'Id': base_image_id})
    setattr(workflow, 'builder', StubInsideBuilder())
    setattr(workflow.builder, 'image_id', '123456imageid')
    setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='22'))
    setattr(workflow.builder, 'source', StubInsideBuilder())
    setattr(workflow.builder, 'built_image_info', {'ParentId': base_image_id})
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', None)
    if primary_images:
        for image in primary_images:
            if '-' in ImageName.parse(image).tag:
                workflow.tag_conf.add_primary_image(image)
        workflow.tag_conf.add_unique_image(primary_images[0])

    if floating_images:
        workflow.tag_conf.add_floating_images(floating_images)

    workflow.build_result = BuildResult(image_id='123456', annotations=annotations or {})
    workflow.postbuild_results = {}
    if manifest_results:
        workflow.postbuild_results[PLUGIN_GROUP_MANIFESTS_KEY] = manifest_results

    return tasker, workflow
def prepare(insecure_registry=None, namespace=None, primary_images_tag_conf=DEFAULT_TAGS_AMOUNT,
            primary_images_annotations=DEFAULT_TAGS_AMOUNT):
    """
    Boiler-plate test set-up
    """
    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(SOURCE, "test-image")
    setattr(workflow, 'builder', X())
    setattr(workflow.builder, 'image_id', 'asd123')
    setattr(workflow.builder, 'source', X())
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', None)

    version_release_primary_image = 'registry.example.com/fedora:version-release'

    annotations = None
    if primary_images_annotations:
        primary_images = [
            'registry.example.com/fedora:annotation_{}'.format(x)
            for x in range(primary_images_annotations)
        ]
        primary_images.append(version_release_primary_image)
        annotations = {'repositories': {'primary': primary_images}}
        annotations
    build_result = BuildResult(annotations=annotations, image_id='foo')
    setattr(workflow, 'build_result', build_result)

    if primary_images_tag_conf:
        primary_images = [
            'registry.example.com/fedora:tag_conf_{}'.format(x)
            for x in range(primary_images_tag_conf)
        ]
        primary_images.append(version_release_primary_image)
        workflow.tag_conf.add_primary_images(primary_images)

    fake_conf = osbs.conf.Configuration(conf_file=None, openshift_url='/')

    expectation = flexmock(osbs.conf).should_receive('Configuration').and_return(fake_conf)
    if namespace:
        expectation.with_args(conf_file=None, namespace=namespace,
                              verify_ssl=False, openshift_url="",
                              use_auth=False, build_json_dir="")

    runner = PostBuildPluginsRunner(tasker, workflow, [{
        'name': ImportImagePlugin.key,
        'args': {
            'imagestream': TEST_IMAGESTREAM,
            'docker_image_repo': TEST_REPO,
            'url': '',
            'build_json_dir': "",
            'verify_ssl': False,
            'use_auth': False,
            'insecure_registry': insecure_registry,
        }}])

    return runner
Exemple #13
0
def prepare(insecure_registry=None,
            retry_delay=0,
            import_attempts=3,
            namespace=None):
    """
    Boiler-plate test set-up
    """
    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(SOURCE, "test-image")
    setattr(workflow, 'builder', X())
    setattr(workflow.builder, 'image_id', 'asd123')
    setattr(workflow.builder, 'source', X())
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', None)

    a = {
        'repositories': {
            'primary': ['one', 'two', 'three', 'four', 'five', 'six']
        }
    }
    build_result = BuildResult(annotations=a, image_id='foo')
    setattr(workflow, 'build_result', build_result)

    fake_conf = osbs.conf.Configuration(conf_file=None, openshift_uri='/')

    expectation = flexmock(
        osbs.conf).should_receive('Configuration').and_return(fake_conf)
    if namespace:
        expectation.with_args(conf_file=None,
                              namespace=namespace,
                              verify_ssl=False,
                              openshift_url="",
                              openshift_uri="",
                              use_auth=False,
                              build_json_dir="")

    runner = PostBuildPluginsRunner(
        tasker, workflow, [{
            'name': ImportImagePlugin.key,
            'args': {
                'imagestream': TEST_IMAGESTREAM,
                'docker_image_repo': TEST_REPO,
                'url': '',
                'build_json_dir': "",
                'verify_ssl': False,
                'use_auth': False,
                'insecure_registry': insecure_registry,
                'retry_delay': retry_delay,
                'import_attempts': import_attempts,
            }
        }])

    return runner
def test_tag_parse(tmpdir, docker_tasker, floating_tags, unique_tags,
                   primary_tags, expected):
    df = df_parser(str(tmpdir))
    df.content = DF_CONTENT_LABELS

    workflow = mock_workflow(tmpdir)
    setattr(workflow.builder, 'df_path', df.dockerfile_path)
    workflow.build_result = BuildResult.make_remote_image_result()

    base_inspect = {
        INSPECT_CONFIG: {
            'Labels': {
                'parentrelease': '7.4.1'
            },
            'Env': {
                'parentrelease': '7.4.1'
            },
        }
    }
    setattr(workflow.builder, 'base_image_inspect', base_inspect)
    mock_additional_tags_file(str(tmpdir), ['get_tags', 'file_tags'])

    if unique_tags is not None and primary_tags is not None and floating_tags is not None:
        input_tags = {
            'unique': unique_tags,
            'primary': primary_tags,
            'floating': floating_tags,
        }
    else:
        input_tags = None
    runner = PostBuildPluginsRunner(docker_tasker, workflow,
                                    [{
                                        'name': TagFromConfigPlugin.key,
                                        'args': {
                                            'tag_suffixes': input_tags
                                        }
                                    }])
    if expected is not None:
        results = runner.run()
        plugin_result = results[TagFromConfigPlugin.key]

        # Plugin should return the tags we expect
        assert plugin_result == expected

        # Workflow should have the expected tags configured
        for tag in expected:
            assert any(tag == str(image) for image in workflow.tag_conf.images)

        # Workflow should not have any other tags configured
        assert len(workflow.tag_conf.images) == len(expected)
    else:
        with pytest.raises(PluginFailedException):
            runner.run()
Exemple #15
0
    def run(self):
        """Build image inside current environment.

        Returns:
            BuildResult
        """
        fetch_sources_result = self.workflow.prebuild_results.get(
            PLUGIN_FETCH_SOURCES_KEY, {})
        source_data_dir = fetch_sources_result.get('image_sources_dir')
        if not source_data_dir or not os.path.isdir(source_data_dir):
            err_msg = "No SRPMs directory '{}' available".format(
                source_data_dir)
            self.log.error(err_msg)
            return BuildResult(logs=err_msg, fail_reason=err_msg)

        if not os.listdir(source_data_dir):
            self.log.warning("SRPMs directory '%s' is empty", source_data_dir)

        image_output_dir = tempfile.mkdtemp()

        cmd = [
            'bsi', '-d', 'sourcedriver_rpm_dir', '-s',
            '{}'.format(source_data_dir), '-o', '{}'.format(image_output_dir)
        ]

        try:
            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            self.log.error("BSI failed with output:\n%s", e.output)
            return BuildResult(
                logs=e.output,
                fail_reason='BSI utility failed build source image')

        self.log.debug("Build log:\n%s\n", output)

        self.export_image(image_output_dir)

        return BuildResult(logs=output,
                           oci_image_path=image_output_dir,
                           skip_layer_squash=True)
def test_tags_enclosed(tmpdir, docker_tasker, name, organization, expected):
    df = df_parser(str(tmpdir))
    df.content = dedent("""\
        FROM fedora
        LABEL "name"="{}"
        LABEL "version"="1.7"
        LABEL "release"="99"
    """.format(name))

    workflow = mock_workflow(tmpdir)
    setattr(workflow.builder, 'df_path', df.dockerfile_path)
    workflow.build_result = BuildResult.make_remote_image_result()

    if organization:
        reactor_config = ReactorConfig({
            'version': 1,
            'registries_organization': organization
        })
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {
            WORKSPACE_CONF_KEY: reactor_config
        }

    input_tags = {
        'unique': ['foo', 'bar'],
        'primary': ['{version}', '{version}-{release}'],
    }

    runner = PostBuildPluginsRunner(docker_tasker, workflow,
                                    [{
                                        'name': TagFromConfigPlugin.key,
                                        'args': {
                                            'tag_suffixes': input_tags
                                        }
                                    }])

    results = runner.run()
    plugin_result = results[TagFromConfigPlugin.key]

    expected_tags = [
        '{}:{}'.format(expected, tag)
        for tag in ['foo', 'bar', '1.7', '1.7-99']
    ]
    # Plugin should return the tags we expect
    assert plugin_result == expected_tags

    # Workflow should have the expected tags configured
    for tag in expected_tags:
        assert any(tag == str(image) for image in workflow.tag_conf.images)

    # Workflow should not have any other tags configured
    assert len(workflow.tag_conf.images) == len(expected_tags)
    def run(self):
        if not self.platforms:
            raise RuntimeError("No enabled platform to build on")
        self.set_build_image()

        thread_pool = ThreadPool(len(self.platforms))
        result = thread_pool.map_async(self.select_and_start_cluster, self.platforms)

        try:
            result.get()
        # Always clean up worker builds on any error to avoid
        # runaway worker builds (includes orchestrator build cancellation)
        except Exception:
            thread_pool.terminate()
            self.log.info('build cancelled, cancelling worker builds')
            if self.worker_builds:
                ThreadPool(len(self.worker_builds)).map(
                    lambda bi: bi.cancel_build(), self.worker_builds)
            while not result.ready():
                result.wait(1)
            raise
        else:
            thread_pool.close()
            thread_pool.join()

        annotations = {'worker-builds': {
            build_info.platform: build_info.get_annotations()
            for build_info in self.worker_builds if build_info.build
        }}

        self._apply_repositories(annotations)

        labels = self._make_labels()

        fail_reasons = {
            build_info.platform: build_info.get_fail_reason()
            for build_info in self.worker_builds
            if not build_info.build or not build_info.build.is_succeeded()
        }

        workspace = self.workflow.plugin_workspace.setdefault(self.key, {})
        workspace[WORKSPACE_KEY_UPLOAD_DIR] = self.koji_upload_dir
        workspace[WORKSPACE_KEY_BUILD_INFO] = {build_info.platform: build_info
                                               for build_info in self.worker_builds}

        if fail_reasons:
            return BuildResult(fail_reason=json.dumps(fail_reasons),
                               annotations=annotations, labels=labels)

        return BuildResult.make_remote_image_result(annotations, labels=labels)
    def run(self):
        """
        build image inside current environment;
        it's expected this may run within (privileged) docker container


        Input:
            df_dir
            image

        Output:
            BuildResult
            built_image_info
            image_id
        """
        builder = self.workflow.builder

        allow_repo_dir_in_dockerignore(builder.df_dir)
        logs_gen = self.tasker.build_image_from_path(
            builder.df_dir, builder.image, buildargs=builder.buildargs)

        self.log.debug('build is submitted, waiting for it to finish')
        try:
            command_result = wait_for_command(logs_gen)
        except docker.errors.APIError as ex:
            return BuildResult(logs=[], fail_reason=ex.explanation)

        if command_result.is_failed():
            return BuildResult(logs=command_result.logs,
                               fail_reason=command_result.error)
        else:
            image_id = builder.get_built_image_info()['Id']
            if ':' not in image_id:
                # Older versions of the daemon do not include the prefix
                image_id = 'sha256:{}'.format(image_id)

            return BuildResult(logs=command_result.logs, image_id=image_id)
Exemple #19
0
def test_build_result():
    with pytest.raises(AssertionError):
        BuildResult(fail_reason='it happens', image_id='spam')

    with pytest.raises(AssertionError):
        BuildResult(fail_reason='', image_id='spam')

    assert BuildResult(fail_reason='it happens').is_failed()
    assert not BuildResult(image_id='spam').is_failed()

    assert BuildResult(image_id='spam', logs=list('logs')).logs == list('logs')

    assert BuildResult(fail_reason='it happens').fail_reason == 'it happens'
    assert BuildResult(image_id='spam').image_id == 'spam'

    assert BuildResult(image_id='spam', annotations={'ham': 'mah'}).annotations == {'ham': 'mah'}

    assert BuildResult(image_id='spam', labels={'ham': 'mah'}).labels == {'ham': 'mah'}

    assert BuildResult(image_id='spam').is_image_available()
    assert not BuildResult(fail_reason='it happens').is_image_available()
    assert not BuildResult.make_remote_image_result().is_image_available()

    assert not BuildResult.make_remote_image_result().is_failed()
def test_tag_parse(tmpdir, docker_tasker, floating_tags, unique_tags, primary_tags, expected):
    df = df_parser(str(tmpdir))
    df.content = DF_CONTENT_LABELS

    workflow = mock_workflow(tmpdir)
    setattr(workflow.builder, 'df_path', df.dockerfile_path)
    workflow.build_result = BuildResult.make_remote_image_result()

    base_inspect = {
        INSPECT_CONFIG: {
            'Labels': {'parentrelease': '7.4.1'},
            'Env': {'parentrelease': '7.4.1'},
        }
    }
    setattr(workflow.builder, 'base_image_inspect', base_inspect)
    mock_additional_tags_file(str(tmpdir), ['get_tags', 'file_tags'])

    if unique_tags is not None and primary_tags is not None and floating_tags is not None:
        input_tags = {
            'unique': unique_tags,
            'primary': primary_tags,
            'floating': floating_tags,
        }
    else:
        input_tags = None
    runner = PostBuildPluginsRunner(
        docker_tasker,
        workflow,
        [{'name': TagFromConfigPlugin.key,
          'args': {'tag_suffixes': input_tags}}]
    )
    if expected is not None:
        results = runner.run()
        plugin_result = results[TagFromConfigPlugin.key]

        # Plugin should return the tags we expect
        assert plugin_result == expected

        # Workflow should have the expected tags configured
        for tag in expected:
            assert any(tag == str(image) for image in workflow.tag_conf.images)

        # Workflow should not have any other tags configured
        assert len(workflow.tag_conf.images) == len(expected)
    else:
        with pytest.raises(PluginFailedException):
            runner.run()
def test_bad_inspect_data(tmpdir, docker_tasker, inspect, error):
    workflow = mock_workflow(tmpdir)
    if inspect is not None:
        workflow.built_image_inspect = {INSPECT_CONFIG: inspect}
    workflow.build_result = BuildResult(image_id=IMPORTED_IMAGE_ID)

    mock_additional_tags_file(str(tmpdir), ['spam', 'bacon'])

    runner = PostBuildPluginsRunner(docker_tasker, workflow,
                                    [{
                                        'name': TagFromConfigPlugin.key
                                    }])

    with pytest.raises(PluginFailedException) as exc:
        runner.run()

    assert error in str(exc)
def test_tags_enclosed(tmpdir, docker_tasker, name, organization, expected):
    df = df_parser(str(tmpdir))
    df.content = dedent("""\
        FROM fedora
        LABEL "name"="{}"
        LABEL "version"="1.7"
        LABEL "release"="99"
    """.format(name))

    workflow = mock_workflow(tmpdir)
    setattr(workflow.builder, 'df_path', df.dockerfile_path)
    workflow.build_result = BuildResult.make_remote_image_result()

    if organization:
        reactor_config = ReactorConfig({
            'version': 1,
            'registries_organization': organization
        })
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {WORKSPACE_CONF_KEY: reactor_config}

    input_tags = {
        'unique': ['foo', 'bar'],
        'primary': ['{version}', '{version}-{release}'],
    }

    runner = PostBuildPluginsRunner(
        docker_tasker,
        workflow,
        [{'name': TagFromConfigPlugin.key,
          'args': {'tag_suffixes': input_tags}}]
    )

    results = runner.run()
    plugin_result = results[TagFromConfigPlugin.key]

    expected_tags = ['{}:{}'.format(expected, tag) for tag in ['foo', 'bar', '1.7', '1.7-99']]
    # Plugin should return the tags we expect
    assert plugin_result == expected_tags

    # Workflow should have the expected tags configured
    for tag in expected_tags:
        assert any(tag == str(image) for image in workflow.tag_conf.images)

    # Workflow should not have any other tags configured
    assert len(workflow.tag_conf.images) == len(expected_tags)
def test_tag_from_config_plugin_generated(tmpdir, docker_tasker, tags, name,
                                          expected):
    workflow = mock_workflow(tmpdir)
    workflow.built_image_inspect = {INSPECT_CONFIG: {'Labels': {'Name': name}}}
    workflow.build_result = BuildResult(image_id=IMPORTED_IMAGE_ID)

    # Simulate missing additional-tags file.
    if tags is not None:
        mock_additional_tags_file(str(tmpdir), tags)

    runner = PostBuildPluginsRunner(docker_tasker, workflow,
                                    [{
                                        'name': TagFromConfigPlugin.key
                                    }])

    results = runner.run()
    plugin_result = results[TagFromConfigPlugin.key]
    assert plugin_result == expected
Exemple #24
0
def test_remove_worker_metadata_no_worker_build(tmpdir, caplog, user_params):
    """Don't traceback with missing worker builds, without worker
    builds plugin should just skip"""
    workflow = mock_workflow(tmpdir)
    annotations = None
    workflow.build_result = BuildResult(annotations=annotations, image_id="id1234")

    runner = ExitPluginsRunner(
        None,
        workflow,
        [{
            'name': PLUGIN_REMOVE_WORKER_METADATA_KEY,
            "args": {}
        }]
    )
    runner.run()

    assert "No build annotations found, skipping plugin" in caplog.text
    assert "Traceback" not in caplog.text
Exemple #25
0
def test_arrangementv4_repositories(tmpdir, group_manifests, restore,
                                    reactor_config_map):
    workflow = prepare(reactor_config_map=reactor_config_map)
    df_content = """
FROM fedora
RUN yum install -y python-django
CMD blabla"""
    df = df_parser(str(tmpdir))
    df.content = df_content
    workflow.builder = X
    workflow.builder.df_path = df.dockerfile_path
    workflow.builder.df_dir = str(tmpdir)

    runner = ExitPluginsRunner(None, workflow,
                               [{
                                   'name': StoreMetadataInOSv3Plugin.key,
                                   "args": {
                                       "url": "http://example.com/"
                                   }
                               }])

    worker_data = {
        'repositories': {
            'primary': ['worker:1'],
            'unique': ['worker:unique'],
        },
    }
    workflow.buildstep_result[OrchestrateBuildPlugin.key] = worker_data
    workflow.build_result = BuildResult.make_remote_image_result(
        annotations=worker_data)
    if group_manifests is not None:
        workflow.postbuild_results[
            PLUGIN_GROUP_MANIFESTS_KEY] = group_manifests
    output = runner.run()
    assert StoreMetadataInOSv3Plugin.key in output
    annotations = output[StoreMetadataInOSv3Plugin.key]["annotations"]
    repositories = json.loads(annotations['repositories'])
    if restore:
        assert repositories != worker_data['repositories']
    else:
        assert repositories == worker_data['repositories']
def test_arrangementv4_repositories(tmpdir, group_manifests, restore):
    workflow = prepare()
    df_content = """
FROM fedora
RUN yum install -y python-django
CMD blabla"""
    df = df_parser(str(tmpdir))
    df.content = df_content
    workflow.builder = X
    workflow.builder.df_path = df.dockerfile_path
    workflow.builder.df_dir = str(tmpdir)

    runner = ExitPluginsRunner(
        None,
        workflow,
        [{
            'name': StoreMetadataInOSv3Plugin.key,
            "args": {
                "url": "http://example.com/"
            }
        }]
    )

    worker_data = {
        'repositories': {
            'primary': ['worker:1'],
            'unique': ['worker:unique'],
        },
    }
    workflow.buildstep_result[OrchestrateBuildPlugin.key] = worker_data
    workflow.build_result = BuildResult.make_remote_image_result(annotations=worker_data)
    if group_manifests is not None:
        workflow.postbuild_results[PLUGIN_GROUP_MANIFESTS_KEY] = group_manifests
    output = runner.run()
    assert StoreMetadataInOSv3Plugin.key in output
    annotations = output[StoreMetadataInOSv3Plugin.key]["annotations"]
    repositories = json.loads(annotations['repositories'])
    if restore:
        assert repositories != worker_data['repositories']
    else:
        assert repositories == worker_data['repositories']
def test_tag_parse(tmpdir, docker_tasker, unique_tags, primary_tags, expected):
    df = df_parser(str(tmpdir))
    df.content = DF_CONTENT_LABELS

    workflow = mock_workflow(tmpdir)
    setattr(workflow.builder, 'df_path', df.dockerfile_path)
    workflow.build_result = BuildResult.make_remote_image_result()

    flexmock(workflow,
             base_image_inspect={
                 INSPECT_CONFIG: {
                     'Labels': {
                         'parentrelease': '7.4.1'
                     },
                     'Env': {
                         'parentrelease': '7.4.1'
                     },
                 }
             })
    mock_additional_tags_file(str(tmpdir), ['get_tags', 'file_tags'])

    if unique_tags is not None and primary_tags is not None:
        input_tags = {'unique': unique_tags, 'primary': primary_tags}
    else:
        input_tags = None
    runner = PostBuildPluginsRunner(docker_tasker, workflow,
                                    [{
                                        'name': TagFromConfigPlugin.key,
                                        'args': {
                                            'tag_suffixes': input_tags
                                        }
                                    }])
    if expected is not None:
        results = runner.run()
        plugin_result = results[TagFromConfigPlugin.key]
        assert plugin_result == expected
    else:
        with pytest.raises(PluginFailedException):
            runner.run()
Exemple #28
0
def test_pulp_publish_delete(worker_builds_created, v1_image_ids, expected,
                             caplog):
    tasker, workflow = prepare(success=False, v1_image_ids=v1_image_ids)
    if not worker_builds_created:
        workflow.build_result = BuildResult(fail_reason="not built")

    plugin = PulpPublishPlugin(tasker, workflow, 'pulp_registry_name')
    msg = "removing ppc64le_v1_image_id from"

    (flexmock(dockpulp.Pulp).should_receive('crane').never())
    if expected:
        (flexmock(dockpulp.Pulp).should_receive('remove').with_args(
            unicode, unicode))
    else:
        (flexmock(dockpulp.Pulp).should_receive('remove').never())

    crane_images = plugin.run()

    assert crane_images == []
    if expected and worker_builds_created:
        assert msg in caplog.text()
    else:
        assert msg not in caplog.text()
def test_tag_parse(tmpdir, docker_tasker, unique_tags, primary_tags, expected):
    df = df_parser(str(tmpdir))
    df.content = DF_CONTENT_LABELS

    workflow = mock_workflow(tmpdir)
    setattr(workflow.builder, 'df_path', df.dockerfile_path)
    workflow.build_result = BuildResult.make_remote_image_result()

    flexmock(workflow, base_image_inspect={
        INSPECT_CONFIG: {
            'Labels': {'parentrelease': '7.4.1'},
            'Env': {'parentrelease': '7.4.1'},
        }
    })
    mock_additional_tags_file(str(tmpdir), ['get_tags', 'file_tags'])

    if unique_tags is not None and primary_tags is not None:
        input_tags = {
            'unique': unique_tags,
            'primary': primary_tags
        }
    else:
        input_tags = None
    runner = PostBuildPluginsRunner(
        docker_tasker,
        workflow,
        [{'name': TagFromConfigPlugin.key,
          'args': {'tag_suffixes': input_tags}}]
    )
    if expected is not None:
        results = runner.run()
        plugin_result = results[TagFromConfigPlugin.key]
        assert plugin_result == expected
    else:
        with pytest.raises(PluginFailedException):
            runner.run()
Exemple #30
0
class DockerBuildWorkflow(object):
    """
    This class defines a workflow for building images:

    1. pull image from registry
    2. tag it properly if needed
    3. obtain source
    4. build image
    5. tag it
    6. push it to registries
    """

    def __init__(self, source, image, prebuild_plugins=None, prepublish_plugins=None,
                 postbuild_plugins=None, exit_plugins=None, plugin_files=None,
                 openshift_build_selflink=None, client_version=None,
                 buildstep_plugins=None, **kwargs):
        """
        :param source: dict, where/how to get source code to put in image
        :param image: str, tag for built image ([registry/]image_name[:tag])
        :param prebuild_plugins: dict, arguments for pre-build plugins
        :param prepublish_plugins: dict, arguments for test-build plugins
        :param postbuild_plugins: dict, arguments for post-build plugins
        :param plugin_files: list of str, load plugins also from these files
        :param openshift_build_selflink: str, link to openshift build (if we're actually running
            on openshift) without the actual hostname/IP address
        :param client_version: str, osbs-client version used to render build json
        :param buildstep_plugins: dict, arguments for build-step plugins
        """
        self.source = get_source_instance_for(source, tmpdir=tempfile.mkdtemp())
        self.image = image

        self.prebuild_plugins_conf = prebuild_plugins
        self.buildstep_plugins_conf = buildstep_plugins
        self.prepublish_plugins_conf = prepublish_plugins
        self.postbuild_plugins_conf = postbuild_plugins
        self.exit_plugins_conf = exit_plugins
        self.prebuild_results = {}
        self.buildstep_result = {}
        self.postbuild_results = {}
        self.prepub_results = {}
        self.exit_results = {}
        self.build_result = BuildResult(fail_reason="not built")
        self.plugin_workspace = {}
        self.plugins_timestamps = {}
        self.plugins_durations = {}
        self.plugins_errors = {}
        self.autorebuild_canceled = False
        self.build_canceled = False
        self.plugin_failed = False
        self.plugin_files = plugin_files

        self.kwargs = kwargs

        self.builder = None
        self.built_image_inspect = None
        self.layer_sizes = []
        self._base_image_inspect = None

        self.pulled_base_images = set()

        # When an image is exported into tarball, it can then be processed by various plugins.
        #  Each plugin that transforms the image should save it as a new file and append it to
        #  the end of exported_image_sequence. Other plugins should then operate with last
        #  member of this structure. Example:
        #  [{'path': '/tmp/foo.tar', 'size': 12345678, 'md5sum': '<md5>', 'sha256sum': '<sha256>'}]
        #  You can use util.get_exported_image_metadata to create a dict to append to this list.
        self.exported_image_sequence = []

        self.tag_conf = TagConf()
        self.push_conf = PushConf()

        # mapping of downloaded files; DON'T PUT ANYTHING BIG HERE!
        # "path/to/file" -> "content"
        self.files = {}

        self.openshift_build_selflink = openshift_build_selflink

        # List of RPMs that go into the final result, as per rpm_util.parse_rpm_output
        self.image_components = None

        if client_version:
            logger.debug("build json was built by osbs-client %s", client_version)

        if kwargs:
            logger.warning("unprocessed keyword arguments: %s", kwargs)

    @property
    def build_process_failed(self):
        """
        Has any aspect of the build process failed?
        """
        return self.build_result.is_failed() or self.plugin_failed

    # inspect base image lazily just before it's needed - pre plugins may change the base image
    @property
    def base_image_inspect(self):
        if self._base_image_inspect is None:
            try:
                self._base_image_inspect = self.builder.tasker.inspect_image(
                    self.builder.base_image)
            except docker.errors.NotFound:
                # If the base image cannot be found throw KeyError - as this property should behave
                # like a dict
                raise KeyError("Unprocessed base image Dockerfile cannot be inspected")
        return self._base_image_inspect

    def throw_canceled_build_exception(self, *args, **kwargs):
        self.build_canceled = True
        raise BuildCanceledException("Build was canceled")

    def build_docker_image(self):
        """
        build docker image

        :return: BuildResult
        """
        self.builder = InsideBuilder(self.source, self.image)
        try:
            signal.signal(signal.SIGTERM, self.throw_canceled_build_exception)
            # time to run pre-build plugins, so they can access cloned repo
            logger.info("running pre-build plugins")
            prebuild_runner = PreBuildPluginsRunner(self.builder.tasker, self,
                                                    self.prebuild_plugins_conf,
                                                    plugin_files=self.plugin_files)
            try:
                prebuild_runner.run()
            except PluginFailedException as ex:
                logger.error("one or more prebuild plugins failed: %s", ex)
                raise
            except AutoRebuildCanceledException as ex:
                logger.info(str(ex))
                self.autorebuild_canceled = True
                raise

            logger.info("running buildstep plugins")
            buildstep_runner = BuildStepPluginsRunner(self.builder.tasker, self,
                                                      self.buildstep_plugins_conf,
                                                      plugin_files=self.plugin_files)
            try:
                self.build_result = buildstep_runner.run()

                if self.build_result.is_failed():
                    raise PluginFailedException(self.build_result.fail_reason)
            except PluginFailedException as ex:
                self.builder.is_built = False
                logger.error('buildstep plugin failed: %s', ex)
                raise

            self.builder.is_built = True
            if self.build_result.is_image_available():
                self.builder.image_id = self.build_result.image_id

            # run prepublish plugins
            prepublish_runner = PrePublishPluginsRunner(self.builder.tasker, self,
                                                        self.prepublish_plugins_conf,
                                                        plugin_files=self.plugin_files)
            try:
                prepublish_runner.run()
            except PluginFailedException as ex:
                logger.error("one or more prepublish plugins failed: %s", ex)
                raise

            if self.build_result.is_image_available():
                self.built_image_inspect = self.builder.inspect_built_image()
                history = self.builder.tasker.d.history(self.builder.image_id)
                diff_ids = self.built_image_inspect[INSPECT_ROOTFS][INSPECT_ROOTFS_LAYERS]

                # diff_ids is ordered oldest first
                # history is ordered newest first
                # We want layer_sizes to be ordered oldest first
                self.layer_sizes = [{"diff_id": diff_id, "size": layer['Size']}
                                    for (diff_id, layer) in zip(diff_ids, reversed(history))]

            postbuild_runner = PostBuildPluginsRunner(self.builder.tasker, self,
                                                      self.postbuild_plugins_conf,
                                                      plugin_files=self.plugin_files)
            try:
                postbuild_runner.run()
            except PluginFailedException as ex:
                logger.error("one or more postbuild plugins failed: %s", ex)
                raise

            return self.build_result
        except Exception as ex:
            logger.debug("caught exception (%r) so running exit plugins", ex)
            raise
        finally:
            # We need to make sure all exit plugins are executed
            signal.signal(signal.SIGTERM, lambda *args: None)
            exit_runner = ExitPluginsRunner(self.builder.tasker, self,
                                            self.exit_plugins_conf,
                                            plugin_files=self.plugin_files)
            try:
                exit_runner.run(keep_going=True)
            except PluginFailedException as ex:
                logger.error("one or more exit plugins failed: %s", ex)
                raise
            finally:
                self.source.remove_tmpdir()

            signal.signal(signal.SIGTERM, signal.SIG_DFL)
Exemple #31
0
    def __init__(self, source, image, prebuild_plugins=None, prepublish_plugins=None,
                 postbuild_plugins=None, exit_plugins=None, plugin_files=None,
                 openshift_build_selflink=None, client_version=None,
                 buildstep_plugins=None, **kwargs):
        """
        :param source: dict, where/how to get source code to put in image
        :param image: str, tag for built image ([registry/]image_name[:tag])
        :param prebuild_plugins: dict, arguments for pre-build plugins
        :param prepublish_plugins: dict, arguments for test-build plugins
        :param postbuild_plugins: dict, arguments for post-build plugins
        :param plugin_files: list of str, load plugins also from these files
        :param openshift_build_selflink: str, link to openshift build (if we're actually running
            on openshift) without the actual hostname/IP address
        :param client_version: str, osbs-client version used to render build json
        :param buildstep_plugins: dict, arguments for build-step plugins
        """
        self.source = get_source_instance_for(source, tmpdir=tempfile.mkdtemp())
        self.image = image

        self.prebuild_plugins_conf = prebuild_plugins
        self.buildstep_plugins_conf = buildstep_plugins
        self.prepublish_plugins_conf = prepublish_plugins
        self.postbuild_plugins_conf = postbuild_plugins
        self.exit_plugins_conf = exit_plugins
        self.prebuild_results = {}
        self.buildstep_result = {}
        self.postbuild_results = {}
        self.prepub_results = {}
        self.exit_results = {}
        self.build_result = BuildResult(fail_reason="not built")
        self.plugin_workspace = {}
        self.plugins_timestamps = {}
        self.plugins_durations = {}
        self.plugins_errors = {}
        self.autorebuild_canceled = False
        self.build_canceled = False
        self.plugin_failed = False
        self.plugin_files = plugin_files
        self.fs_watcher = FSWatcher()

        self.kwargs = kwargs

        self.builder = None
        self.built_image_inspect = None
        self.layer_sizes = []
        self._base_image_inspect = None
        self.default_image_build_method = CONTAINER_DEFAULT_BUILD_METHOD

        self.pulled_base_images = set()

        # When an image is exported into tarball, it can then be processed by various plugins.
        #  Each plugin that transforms the image should save it as a new file and append it to
        #  the end of exported_image_sequence. Other plugins should then operate with last
        #  member of this structure. Example:
        #  [{'path': '/tmp/foo.tar', 'size': 12345678, 'md5sum': '<md5>', 'sha256sum': '<sha256>'}]
        #  You can use util.get_exported_image_metadata to create a dict to append to this list.
        self.exported_image_sequence = []

        self.tag_conf = TagConf()
        self.push_conf = PushConf()

        # mapping of downloaded files; DON'T PUT ANYTHING BIG HERE!
        # "path/to/file" -> "content"
        self.files = {}

        self.openshift_build_selflink = openshift_build_selflink

        # List of RPMs that go into the final result, as per rpm_util.parse_rpm_output
        self.image_components = None

        if client_version:
            logger.debug("build json was built by osbs-client %s", client_version)

        if kwargs:
            logger.warning("unprocessed keyword arguments: %s", kwargs)
Exemple #32
0
class DockerBuildWorkflow(object):
    """
    This class defines a workflow for building images:

    1. pull image from registry
    2. tag it properly if needed
    3. obtain source
    4. build image
    5. tag it
    6. push it to registries
    """

    def __init__(self, source, image, prebuild_plugins=None, prepublish_plugins=None,
                 postbuild_plugins=None, exit_plugins=None, plugin_files=None,
                 openshift_build_selflink=None, client_version=None,
                 buildstep_plugins=None, **kwargs):
        """
        :param source: dict, where/how to get source code to put in image
        :param image: str, tag for built image ([registry/]image_name[:tag])
        :param prebuild_plugins: dict, arguments for pre-build plugins
        :param prepublish_plugins: dict, arguments for test-build plugins
        :param postbuild_plugins: dict, arguments for post-build plugins
        :param plugin_files: list of str, load plugins also from these files
        :param openshift_build_selflink: str, link to openshift build (if we're actually running
            on openshift) without the actual hostname/IP address
        :param client_version: str, osbs-client version used to render build json
        :param buildstep_plugins: dict, arguments for build-step plugins
        """
        self.source = get_source_instance_for(source, tmpdir=tempfile.mkdtemp())
        self.image = image

        self.prebuild_plugins_conf = prebuild_plugins
        self.buildstep_plugins_conf = buildstep_plugins
        self.prepublish_plugins_conf = prepublish_plugins
        self.postbuild_plugins_conf = postbuild_plugins
        self.exit_plugins_conf = exit_plugins
        self.prebuild_results = {}
        self.buildstep_result = {}
        self.postbuild_results = {}
        self.prepub_results = {}
        self.exit_results = {}
        self.build_result = BuildResult(fail_reason="not built")
        self.plugin_workspace = {}
        self.plugins_timestamps = {}
        self.plugins_durations = {}
        self.plugins_errors = {}
        self.autorebuild_canceled = False
        self.build_canceled = False
        self.plugin_failed = False
        self.plugin_files = plugin_files
        self.fs_watcher = FSWatcher()

        self.kwargs = kwargs

        self.builder = None
        self.built_image_inspect = None
        self.layer_sizes = []
        self._base_image_inspect = None
        self.default_image_build_method = CONTAINER_DEFAULT_BUILD_METHOD

        self.pulled_base_images = set()

        # When an image is exported into tarball, it can then be processed by various plugins.
        #  Each plugin that transforms the image should save it as a new file and append it to
        #  the end of exported_image_sequence. Other plugins should then operate with last
        #  member of this structure. Example:
        #  [{'path': '/tmp/foo.tar', 'size': 12345678, 'md5sum': '<md5>', 'sha256sum': '<sha256>'}]
        #  You can use util.get_exported_image_metadata to create a dict to append to this list.
        self.exported_image_sequence = []

        self.tag_conf = TagConf()
        self.push_conf = PushConf()

        # mapping of downloaded files; DON'T PUT ANYTHING BIG HERE!
        # "path/to/file" -> "content"
        self.files = {}

        self.openshift_build_selflink = openshift_build_selflink

        # List of RPMs that go into the final result, as per rpm_util.parse_rpm_output
        self.image_components = None

        if client_version:
            logger.debug("build json was built by osbs-client %s", client_version)

        if kwargs:
            logger.warning("unprocessed keyword arguments: %s", kwargs)

    @property
    def build_process_failed(self):
        """
        Has any aspect of the build process failed?
        """
        return self.build_result.is_failed() or self.plugin_failed

    # inspect base image lazily just before it's needed - pre plugins may change the base image
    @property
    def base_image_inspect(self):
        if self._base_image_inspect is None:
            try:
                self._base_image_inspect = self.builder.tasker.inspect_image(
                    self.builder.base_image)
            except docker.errors.NotFound:
                # If the base image cannot be found throw KeyError - as this property should behave
                # like a dict
                raise KeyError("Unprocessed base image Dockerfile cannot be inspected")
        return self._base_image_inspect

    def throw_canceled_build_exception(self, *args, **kwargs):
        self.build_canceled = True
        raise BuildCanceledException("Build was canceled")

    def build_docker_image(self):
        """
        build docker image

        :return: BuildResult
        """
        self.builder = InsideBuilder(self.source, self.image)
        try:
            self.fs_watcher.start()
            signal.signal(signal.SIGTERM, self.throw_canceled_build_exception)
            # time to run pre-build plugins, so they can access cloned repo
            logger.info("running pre-build plugins")
            prebuild_runner = PreBuildPluginsRunner(self.builder.tasker, self,
                                                    self.prebuild_plugins_conf,
                                                    plugin_files=self.plugin_files)
            try:
                prebuild_runner.run()
            except PluginFailedException as ex:
                logger.error("one or more prebuild plugins failed: %s", ex)
                raise
            except AutoRebuildCanceledException as ex:
                logger.info(str(ex))
                self.autorebuild_canceled = True
                raise

            logger.info("running buildstep plugins")
            buildstep_runner = BuildStepPluginsRunner(self.builder.tasker, self,
                                                      self.buildstep_plugins_conf,
                                                      plugin_files=self.plugin_files)
            try:
                self.build_result = buildstep_runner.run()

                if self.build_result.is_failed():
                    raise PluginFailedException(self.build_result.fail_reason)
            except PluginFailedException as ex:
                self.builder.is_built = False
                logger.error('buildstep plugin failed: %s', ex)
                raise

            self.builder.is_built = True
            if self.build_result.is_image_available():
                self.builder.image_id = self.build_result.image_id

            # run prepublish plugins
            prepublish_runner = PrePublishPluginsRunner(self.builder.tasker, self,
                                                        self.prepublish_plugins_conf,
                                                        plugin_files=self.plugin_files)
            try:
                prepublish_runner.run()
            except PluginFailedException as ex:
                logger.error("one or more prepublish plugins failed: %s", ex)
                raise

            if self.build_result.is_image_available():
                self.built_image_inspect = self.builder.inspect_built_image()
                history = self.builder.tasker.d.history(self.builder.image_id)
                diff_ids = self.built_image_inspect[INSPECT_ROOTFS][INSPECT_ROOTFS_LAYERS]

                # diff_ids is ordered oldest first
                # history is ordered newest first
                # We want layer_sizes to be ordered oldest first
                self.layer_sizes = [{"diff_id": diff_id, "size": layer['Size']}
                                    for (diff_id, layer) in zip(diff_ids, reversed(history))]

            postbuild_runner = PostBuildPluginsRunner(self.builder.tasker, self,
                                                      self.postbuild_plugins_conf,
                                                      plugin_files=self.plugin_files)
            try:
                postbuild_runner.run()
            except PluginFailedException as ex:
                logger.error("one or more postbuild plugins failed: %s", ex)
                raise

            return self.build_result
        except Exception as ex:
            logger.debug("caught exception (%r) so running exit plugins", ex)
            raise
        finally:
            # We need to make sure all exit plugins are executed
            signal.signal(signal.SIGTERM, lambda *args: None)
            exit_runner = ExitPluginsRunner(self.builder.tasker, self,
                                            self.exit_plugins_conf,
                                            plugin_files=self.plugin_files)
            try:
                exit_runner.run(keep_going=True)
            except PluginFailedException as ex:
                logger.error("one or more exit plugins failed: %s", ex)
                raise
            finally:
                self.source.remove_tmpdir()
                self.fs_watcher.finish()

            signal.signal(signal.SIGTERM, signal.SIG_DFL)
Exemple #33
0
def prepare(
    tmpdir,
    insecure_registry=None,
    namespace=None,
    primary_images_tag_conf=DEFAULT_TAGS_AMOUNT,
    primary_images_annotations=DEFAULT_TAGS_AMOUNT,
    build_process_failed=False,
    organization=None,
    reactor_config_map=False,
):
    """
    Boiler-plate test set-up
    """
    if MOCK:
        mock_docker()
    tasker = DockerTasker()
    workflow = DockerBuildWorkflow(SOURCE, "test-image")
    setattr(workflow, 'builder', X())
    flexmock(workflow, build_process_failed=build_process_failed)
    setattr(workflow.builder, 'image_id', 'asd123')
    setattr(workflow.builder, 'source', X())
    setattr(workflow.builder.source, 'dockerfile_path', None)
    setattr(workflow.builder.source, 'path', None)

    df = tmpdir.join('Dockerfile')
    df.write('FROM base\n')
    df.write('LABEL name={}'.format(TEST_NAME_LABEL))
    setattr(workflow.builder, 'df_path', str(df))

    annotations = None
    if primary_images_annotations:
        primary_images = [
            '{}:annotation_{}'.format(TEST_REPO_WITH_REGISTRY, x)
            for x in range(primary_images_annotations)
        ]
        primary_images.append(
            '{}:version-release'.format(TEST_REPO_WITH_REGISTRY))
        annotations = {'repositories': {'primary': primary_images}}
        annotations
    build_result = BuildResult(annotations=annotations, image_id='foo')
    setattr(workflow, 'build_result', build_result)

    if primary_images_tag_conf:
        primary_images = [
            '{}:tag_conf_{}'.format(TEST_REPO, x)
            for x in range(primary_images_tag_conf)
        ]
        primary_images.insert(0, '{}:version-release'.format(TEST_REPO))
        workflow.tag_conf.add_primary_images(primary_images)

    fake_conf = osbs.conf.Configuration(conf_file=None, openshift_url='/')

    expectation = flexmock(
        osbs.conf).should_receive('Configuration').and_return(fake_conf)
    if namespace:
        expectation.with_args(conf_file=None,
                              namespace=namespace,
                              verify_ssl=not insecure_registry,
                              openshift_url="/",
                              use_auth=False,
                              build_json_dir="/var/json_dir")

    plugin_args = {'imagestream': TEST_IMAGESTREAM}

    if reactor_config_map:
        openshift_map = {
            'url': '/',
            'auth': {
                'enable': False
            },
            'insecure': insecure_registry,
            'build_json_dir': '/var/json_dir',
        }
        source_registry_map = {
            'url': TEST_REGISTRY,
            'insecure': insecure_registry
        }
        workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
        workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
            ReactorConfig({
                'version': 1,
                'openshift': openshift_map,
                'source_registry': source_registry_map,
                'registries_organization': organization,
            })
    else:
        plugin_args.update({
            'docker_image_repo': TEST_REPO_WITH_REGISTRY,
            'url': '/',
            'build_json_dir': "/var/json_dir",
            'verify_ssl': not insecure_registry,
            'use_auth': False,
            'insecure_registry': insecure_registry,
        })

    runner = PostBuildPluginsRunner(tasker, workflow,
                                    [{
                                        'name': ImportImagePlugin.key,
                                        'args': plugin_args
                                    }])

    def mocked_import_image_tags(**kwargs):
        return

    if not hasattr(OSBS, 'import_image_tags'):
        setattr(OSBS, 'import_image_tags', mocked_import_image_tags)

    return runner
Exemple #34
0
 class WF(object):
     build_result = BuildResult(fail_reason="not built")
     autorebuild_canceled = False
     prebuild_results = {CheckAndSetRebuildPlugin.key: True}
     image = ImageName.parse('repo/name')
Exemple #35
0
from tests.docker_mock import mock_docker
from tests.util import requires_internet, is_string_type
import inspect
import signal
import threading

from time import sleep

from atomic_reactor.inner import BuildResults, BuildResultsEncoder, BuildResultsJSONDecoder
from atomic_reactor.inner import DockerBuildWorkflow

BUILD_RESULTS_ATTRS = [
    'build_logs', 'built_img_inspect', 'built_img_info', 'base_img_info',
    'base_plugins_output', 'built_img_plugins_output'
]
DUMMY_BUILD_RESULT = BuildResult(image_id="image_id")
DUMMY_FAILED_BUILD_RESULT = BuildResult(fail_reason='it happens')
DUMMY_REMOTE_BUILD_RESULT = BuildResult.make_remote_image_result()


def test_build_results_encoder():
    results = BuildResults()
    expected_data = {}
    for attr in BUILD_RESULTS_ATTRS:
        setattr(results, attr, attr)
        expected_data[attr] = attr

    data = json.loads(json.dumps(results, cls=BuildResultsEncoder))
    assert data == expected_data

def test_build_result():
    with pytest.raises(AssertionError):
        BuildResult(fail_reason='it happens', image_id='spam')

    with pytest.raises(AssertionError):
        BuildResult(fail_reason='', image_id='spam')

    assert BuildResult(fail_reason='it happens').is_failed()
    assert not BuildResult(image_id='spam').is_failed()

    assert BuildResult(image_id='spam', logs=list('logs')).logs == list('logs')

    assert BuildResult(fail_reason='it happens').fail_reason == 'it happens'
    assert BuildResult(image_id='spam').image_id == 'spam'

    assert BuildResult(image_id='spam', annotations={'ham': 'mah'}).annotations == {'ham': 'mah'}

    assert BuildResult(image_id='spam', labels={'ham': 'mah'}).labels == {'ham': 'mah'}

    assert BuildResult(image_id='spam').is_image_available()
    assert not BuildResult(fail_reason='it happens').is_image_available()
    assert not BuildResult.make_remote_image_result().is_image_available()

    assert not BuildResult.make_remote_image_result().is_failed()
from atomic_reactor.inner import BuildResults, BuildResultsEncoder, BuildResultsJSONDecoder
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.inner import FSWatcher
from atomic_reactor.constants import INSPECT_ROOTFS, INSPECT_ROOTFS_LAYERS


BUILD_RESULTS_ATTRS = ['build_logs',
                       'built_img_inspect',
                       'built_img_info',
                       'base_img_info',
                       'base_plugins_output',
                       'built_img_plugins_output']
DUMMY_BUILD_RESULT = BuildResult(image_id="image_id")
DUMMY_FAILED_BUILD_RESULT = BuildResult(fail_reason='it happens')
DUMMY_REMOTE_BUILD_RESULT = BuildResult.make_remote_image_result()


def test_build_results_encoder():
    results = BuildResults()
    expected_data = {}
    for attr in BUILD_RESULTS_ATTRS:
        setattr(results, attr, attr)
        expected_data[attr] = attr

    data = json.loads(json.dumps(results, cls=BuildResultsEncoder))
    assert data == expected_data


def test_build_results_decoder():
    data = {}
    def run(self):
        """
        Build image inside current environment using imagebuilder;
        It's expected this may run within (privileged) docker container.

        Returns:
            BuildResult
        """
        builder = self.workflow.builder

        image = builder.image.to_str()
        # TODO: directly invoke go imagebuilder library in shared object via python module
        kwargs = dict(stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        if not PY2:
            kwargs['encoding'] = 'utf-8'
        ib_process = subprocess.Popen(
            ['imagebuilder', '-t', image, builder.df_dir], **kwargs)

        self.log.debug(
            'imagebuilder build has begun; waiting for it to finish')
        (output, last_error) = ([], None)
        while True:
            poll = ib_process.poll()
            out = sixdecode(ib_process.stdout.readline())
            if out:
                self.log.info(out.strip())
                output.append(out)
            err = sixdecode(ib_process.stderr.readline())
            if err:
                self.log.error(err.strip())
                output.append(err)  # include stderr with stdout
                last_error = err  # while noting the final line
            if out == '' and err == '':
                if poll is not None:
                    break
                time.sleep(0.1)  # don't busy-wait when there's no output

        if ib_process.returncode != 0:
            # imagebuilder uses stderr for normal output too; so in the case of an apparent
            # failure, single out the last line to include in the failure summary.
            err = last_error or "<imagebuilder had bad exit code but no error output>"
            return BuildResult(
                logs=output,
                fail_reason="image build failed (rc={}): {}".format(
                    ib_process.returncode, err),
            )

        image_id = builder.get_built_image_info()['Id']
        if ':' not in image_id:
            # Older versions of the daemon do not include the prefix
            image_id = 'sha256:{}'.format(image_id)

        # since we need no squash, export the image for local operations like squash would have
        self.log.info("fetching image %s from docker", image)
        output_path = os.path.join(self.workflow.source.workdir,
                                   EXPORTED_SQUASHED_IMAGE_NAME)
        with open(output_path, "w") as image_file:
            image_file.write(self.tasker.d.get_image(image).data)
        img_metadata = get_exported_image_metadata(output_path,
                                                   IMAGE_TYPE_DOCKER_ARCHIVE)
        self.workflow.exported_image_sequence.append(img_metadata)

        return BuildResult(logs=output,
                           image_id=image_id,
                           skip_layer_squash=True)
Exemple #39
0
    def __init__(self, source, image, prebuild_plugins=None, prepublish_plugins=None,
                 postbuild_plugins=None, exit_plugins=None, plugin_files=None,
                 openshift_build_selflink=None, client_version=None,
                 buildstep_plugins=None, **kwargs):
        """
        :param source: dict, where/how to get source code to put in image
        :param image: str, tag for built image ([registry/]image_name[:tag])
        :param prebuild_plugins: dict, arguments for pre-build plugins
        :param prepublish_plugins: dict, arguments for test-build plugins
        :param postbuild_plugins: dict, arguments for post-build plugins
        :param plugin_files: list of str, load plugins also from these files
        :param openshift_build_selflink: str, link to openshift build (if we're actually running
            on openshift) without the actual hostname/IP address
        :param client_version: str, osbs-client version used to render build json
        :param buildstep_plugins: dict, arguments for build-step plugins
        """
        self.source = get_source_instance_for(source, tmpdir=tempfile.mkdtemp())
        self.image = image

        self.prebuild_plugins_conf = prebuild_plugins
        self.buildstep_plugins_conf = buildstep_plugins
        self.prepublish_plugins_conf = prepublish_plugins
        self.postbuild_plugins_conf = postbuild_plugins
        self.exit_plugins_conf = exit_plugins
        self.prebuild_results = {}
        self.buildstep_result = {}
        self.postbuild_results = {}
        self.prepub_results = {}
        self.exit_results = {}
        self.build_result = BuildResult(fail_reason="not built")
        self.plugin_workspace = {}
        self.plugins_timestamps = {}
        self.plugins_durations = {}
        self.plugins_errors = {}
        self.autorebuild_canceled = False
        self.build_canceled = False
        self.plugin_failed = False
        self.plugin_files = plugin_files

        self.kwargs = kwargs

        self.builder = None
        self.built_image_inspect = None
        self.layer_sizes = []
        self._base_image_inspect = None

        self.pulled_base_images = set()

        # When an image is exported into tarball, it can then be processed by various plugins.
        #  Each plugin that transforms the image should save it as a new file and append it to
        #  the end of exported_image_sequence. Other plugins should then operate with last
        #  member of this structure. Example:
        #  [{'path': '/tmp/foo.tar', 'size': 12345678, 'md5sum': '<md5>', 'sha256sum': '<sha256>'}]
        #  You can use util.get_exported_image_metadata to create a dict to append to this list.
        self.exported_image_sequence = []

        self.tag_conf = TagConf()
        self.push_conf = PushConf()

        # mapping of downloaded files; DON'T PUT ANYTHING BIG HERE!
        # "path/to/file" -> "content"
        self.files = {}

        self.openshift_build_selflink = openshift_build_selflink

        # List of RPMs that go into the final result, as per rpm_util.parse_rpm_output
        self.image_components = None

        if client_version:
            logger.debug("build json was built by osbs-client %s", client_version)

        if kwargs:
            logger.warning("unprocessed keyword arguments: %s", kwargs)