Esempio n. 1
0
    def test_koji_wrapper_event_constraint(self):
        brew.KojiWrapper.clear_global_cache()

        lock_on_event = 37152928
        test_tag = 'rhaos-4.5-rhel-8-candidate'

        # Get a non brew-event locked wrapper
        k = brew.KojiWrapper(
            ['https://brewhub.engineering.redhat.com/brewhub'],
            brew_event=lock_on_event)

        try:
            # If events are locked and user calls a non-constrainable koji API, we should raise an exception
            k.getLastEvent()
            self.fail(
                'An exception should have been raised when invoking a non-constainable api call'
            )
        except IOError:
            pass

        # However, if you tell the wrapper you are aware of the lock, you may perform the call
        k.getLastEvent(brew.KojiWrapperOpts(brew_event_aware=True))

        results = k.listTagged(
            test_tag,
            brew.KojiWrapperOpts(caching=True, logger=self.logger),
            package='openshift'
        )  # This should be transparently locked in brew time by the wrapper
        self.assertEqual(
            results[0]['task_id'], 34774198
        )  # since we are locked in the time. this task_id is locked in time
        call_meta: brew.KojiWrapperMetaReturn = k.listTagged(
            test_tag,
            brew.KojiWrapperOpts(caching=True, return_metadata=True),
            package='openshift'
        )  # Test to ensure caching is working with a brew event constraint.
        self.assertTrue(call_meta.cache_hit)
        self.assertEqual(call_meta.result[0]['task_id'], 34774198)

        # Now perform the same query, with caching on, without constraining the brew event.
        # The cache for constrained query vs an unconstrained query share different namespaces
        # and thus we expect a different result.
        unlocked_k = brew.KojiWrapper(
            ['https://brewhub.engineering.redhat.com/brewhub'])
        unlocked_results = unlocked_k.listTagged(
            test_tag, brew.KojiWrapperOpts(caching=True), package='openshift')
        self.assertNotEqual(
            unlocked_results[0]['task_id'], 34774198
        )  # This was an unlocked query and we know the latest task has moved on
Esempio n. 2
0
 def run_multicall():
     m = k.multicall()
     call_1 = m.getLastEvent(
         brew.KojiWrapperOpts(caching=True, return_metadata=True))
     call_2 = m.getTag(
         'rhaos-4.7-rhel-8-candidate'
     )  # Note that if caching/metadata is true for one call in multicall, it is True for all
     m.call_all()
     call_1_meta: brew.KojiWrapperMetaReturn = call_1.result
     call_2_meta: brew.KojiWrapperMetaReturn = call_2.result
     return call_1_meta, call_2_meta
Esempio n. 3
0
    def get_package_build_objects(self) -> Dict[str, Dict]:
        """
        :return: Returns a Dict containing records for package builds corresponding to
                 RPMs used by this RHCOS build.
                 Maps package_name -> brew build dict for package.
        """

        aggregate: Dict[str, Dict] = dict()
        with self.runtime.pooled_koji_client_session() as koji_api:
            for nvra in self.get_rpm_nvras():
                rpm_def = koji_api.getRPM(nvra, strict=True)
                package_build = koji_api.getBuild(rpm_def['build_id'], brew.KojiWrapperOpts(caching=True), strict=True)
                package_name = package_build['package_name']
                aggregate[package_name] = package_build

        return aggregate
Esempio n. 4
0
    def test_koji_wrapper_caching(self):
        brew.KojiWrapper.clear_global_cache()

        # Get a non brew-event locked wrapper
        k = brew.KojiWrapper(
            ['https://brewhub.engineering.redhat.com/brewhub'])
        call_meta: brew.KojiWrapperMetaReturn = k.getLastEvent(
            brew.KojiWrapperOpts(caching=True, return_metadata=True))
        self.assertFalse(
            call_meta.cache_hit)  # The first query should be a miss
        last_event = call_meta.result
        # The value is cached, so a non-meta return it should certainly be the same when we ask for it again
        self.assertEqual(k.getLastEvent(brew.KojiWrapperOpts(caching=True)),
                         last_event)

        # Now ask for metadata to ensure it was cached
        call_meta: brew.KojiWrapperMetaReturn = k.getLastEvent(
            brew.KojiWrapperOpts(caching=True, return_metadata=True))
        self.assertTrue(call_meta.cache_hit)
        self.assertEqual(call_meta.result, last_event)

        # Now make the same request without caching and ensure cache_hit is not True
        call_meta: brew.KojiWrapperMetaReturn = k.getLastEvent(
            brew.KojiWrapperOpts(return_metadata=True))
        self.assertFalse(call_meta.cache_hit)

        # Repeat the above with a multicall
        def run_multicall():
            m = k.multicall()
            call_1 = m.getLastEvent(
                brew.KojiWrapperOpts(caching=True, return_metadata=True))
            call_2 = m.getTag(
                'rhaos-4.7-rhel-8-candidate'
            )  # Note that if caching/metadata is true for one call in multicall, it is True for all
            m.call_all()
            call_1_meta: brew.KojiWrapperMetaReturn = call_1.result
            call_2_meta: brew.KojiWrapperMetaReturn = call_2.result
            return call_1_meta, call_2_meta

        c1_meta, c2_meta = run_multicall()
        self.assertFalse(
            c1_meta.cache_hit
        )  # Though getLastEvent was cached above a single call, it has to be cached uniquely as a multiCall
        self.assertFalse(
            c2_meta.cache_hit)  # cache_hit should be identical for every call
        self.assertEqual(c2_meta.result['id'],
                         70115)  # The numeric id for the tag should not change

        # Now try it again and we should hit the cache
        _c1_meta, _c2_meta = run_multicall()
        self.assertTrue(_c1_meta.cache_hit)
        self.assertEqual(_c1_meta.result, c1_meta.result)
        self.assertEqual(_c2_meta.result, c2_meta.result)
        self.assertEqual(_c2_meta.result['id'],
                         70115)  # The numeric id for the tag should not change

        cache_size = brew.KojiWrapper.get_cache_size()
        brew.KojiWrapper.clear_global_cache()  # Test clearing cache
        self.assertGreater(cache_size, brew.KojiWrapper.get_cache_size())

        # After clearing the cache, we should miss again
        c1_meta, c2_meta = run_multicall()
        self.assertFalse(c1_meta.cache_hit)
        self.assertFalse(c2_meta.cache_hit)
        self.assertEqual(c2_meta.result['id'],
                         70115)  # The numeric id for the tag should not change
Esempio n. 5
0
def is_image_older_than_package_build_tagging(image_meta, image_build_event_id,
                                              package_build,
                                              newest_image_event_ts,
                                              oldest_image_event_ts):
    """
    Determines if a given rpm is part of a package that has been tagged in a relevant tag AFTER image_build_event_id
    :param image_meta: The image meta object for the image.
    :param image_build_event_id: The build event for the image.
    :param package_build: The package build to assess.
    :param newest_image_event_ts: The build timestamp of the most recently built image in this group.
    :param oldest_image_event_ts: The build timestamp of the oldest build in this group from getLatestBuild of each component.
    :return: (<bool>, message) . If True, the message will describe the change reason. If False, message will
            will be None.
    """

    # If you are considering changing this code, you are going to have to contend with
    # complex scenarios like: what if we pulled in this RPM by tagging it, intentionally
    # backlevel, from another product, and we want to keep like that? Or, what if we tagged
    # in a non-released version of another product to get a fix pre-release of that package,
    # but subsequently want to inherit later versions the original product ships.
    # This blunt approach isn't trying to be perfect, but it will rarely do an unnecessary
    # rebuild and handles those complex scenarios by erring on the side of doing the rebuild.

    runtime = image_meta.runtime

    with runtime.pooled_koji_client_session() as koji_api:
        package_build_id = package_build['build_id']
        package_name = package_build['package_name']

        # At the time this NEWEST image in the group was built, what were the active tags by which our image may have
        # pulled in the build in question.
        # We could make this simple by looking for  beforeEvent=image_build_event_id , but this would require
        # a new, relatively large koji api for each image. By using  newest_image_event_ts,
        # we have a single, cached call which can be used for all subsequent analysis of this package.
        possible_active_tag_events = koji_api.queryHistory(
            brew.KojiWrapperOpts(caching=True),
            table='tag_listing',
            build=package_build_id,
            active=True,
            before=newest_image_event_ts)['tag_listing']

        # Now filter down the list to just the tags which might have contributed to our image build.
        contemporary_active_tag_names = set()
        for event in possible_active_tag_events:
            tag_name = event['tag.name']
            # There are some tags that are guaranteed not to be the way our image found the package.
            # Exclude them from the list of relevant tags.
            if tag_name == 'trashcan' or '-private' in tag_name or 'deleted' in tag_name:
                continue
            if tag_name.endswith(('-released', '-set', '-pending', '-backup')):
                # Ignore errata tags (e.g. RHBA-2020:2309-released, RHBA-2020:3027-pending) and tags like rhel-8.0.0-z-batch-0.3-set
                continue
            if tag_name.endswith(('-candidate', '-build')):
                # Eliminate candidate tags (we will add this image's -candidate tag back in below)
                continue
            # Finally, see if the event happened after THIS image was created
            if event['create_event'] < image_build_event_id:
                contemporary_active_tag_names.add(tag_name)

        image_meta.logger.info(
            f'Checking for tagging changes for {package_name} which old build may have received through: {contemporary_active_tag_names}'
        )

        # Given an RPM X with a history of tags {x}, we know we received & installed
        # the RPM through one of the tags in {x}. What happens if a new RPM Y had a set of tags {y} that
        # is fully disjoint from {x}. In short, it came in through a completely independent vector, but we
        # would still find it through that vector if a build was triggered.
        # This could happen if:
        # 1. group.yml repos are changed and Y would be pulled from a new source
        # 2. The new Y is available through a different repo than we found it in last time.
        # To mitigate #1, we should force build after changing group.yml repos.
        # For #2, the only way this would typically happen would be if we were pulling from an official
        # repo for rpm X and then Y was tagged into our candidate tag as an override. To account for
        # this, always check for changes in our tags.
        contemporary_active_tag_names.add(image_meta.branch())
        contemporary_active_tag_names.add(image_meta.candidate_brew_tag())

        # Now let's look for tags that were applied to this package AFTER the oldest image in the group.
        # We could make this simple by looking for  afterEvent=image_build_event_id , but this would require
        # a new, relatively large koji api for each image. By looking all the way back to  eldest_image_event_ts,
        # we have a single, cached call which can be used for all subsequent analysis of this package.
        active_tag_events = koji_api.queryHistory(
            brew.KojiWrapperOpts(caching=True),
            table='tag_listing',
            package=package_name,
            active=True,
            after=oldest_image_event_ts)['tag_listing']

        subsequent_active_tag_names = set()
        for event in active_tag_events:
            # See if the event happened after THIS image was created
            if event['create_event'] > image_build_event_id:
                subsequent_active_tag_names.add(event['tag.name'])

        image_meta.logger.info(
            f'Checking for tagging changes for {package_name} where tags have been modified since build: {subsequent_active_tag_names}'
        )

        # Here's the magic, hopefully. If the tags when the image was built and subsequent tag names intersect,
        # we know that a tag that may have delivered a build into our image, has subsequently been updated to point to
        # a different build of that package. This means, if we build again, we MIGHT pull in that newly
        # tagged package.
        intersection_set = subsequent_active_tag_names.intersection(
            contemporary_active_tag_names)

        if intersection_set:
            return True, f'Package {package_name} has been retagged by potentially relevant tags since image build: {intersection_set}'

    return False, None
Esempio n. 6
0
    def does_image_need_change(self,
                               changing_rpm_packages=[],
                               buildroot_tag=None,
                               newest_image_event_ts=None,
                               oldest_image_event_ts=None):
        """
        Answers the question of whether the latest built image needs to be rebuilt based on
        the packages (and therefore RPMs) it is dependent on might have changed in tags
        relevant to the image. A check is also made if the image depends on a package
        we know is changing because we are about to rebuild it.
        :param changing_rpm_packages: A list of package names that are about to change.
        :param buildroot_tag: The build root for this image
        :param newest_image_event_ts: The build timestamp of the most recently built image in this group.
        :param oldest_image_event_ts: The build timestamp of the oldest build in this group from getLatestBuild of each component.
        :return: (meta, <bool>, messsage). If True, the image might need to be rebuilt -- the message will say
                why. If False, message will be None.
        """

        dgk = self.distgit_key
        runtime = self.runtime

        builds_contained_in_archives = {
        }  # build_id => result of koji.getBuild(build_id)
        with runtime.pooled_koji_client_session() as koji_api:

            image_build = self.get_latest_build(default='')
            if not image_build:
                # Seems this have never been built. Mark it as needing change.
                return self, True, 'Image has never been built before'

            self.logger.debug(f'Image {dgk} latest is {image_build}')

            image_nvr = image_build['nvr']
            image_build_event_id = image_build[
                'creation_event_id']  # the brew event that created this build

            self.logger.info(
                f'Running a change assessment on {image_nvr} built at event {image_build_event_id}'
            )

            # Very rarely, an image might need to pull a package that is not actually installed in the
            # builder image or in the final image.
            # e.g. https://github.com/openshift/ironic-ipa-downloader/blob/999c80f17472d5dbbd4775d901e1be026b239652/Dockerfile.ocp#L11-L14
            # This is programmatically undetectable through koji queries. So we allow extra scan-sources hints to
            # be placed in the image metadata.
            if self.config.scan_sources.extra_packages is not Missing:
                for package_details in self.config.scan_sources.extra_packages:
                    extra_package_name = package_details.name
                    extra_package_brew_tag = package_details.tag
                    # Example output: https://gist.github.com/jupierce/3bbc8be7265348a8f549d401664c9972
                    extra_latest_tagging_infos = koji_api.queryHistory(
                        table='tag_listing',
                        tag=extra_package_brew_tag,
                        package=extra_package_name,
                        active=True)['tag_listing']

                    if not extra_latest_tagging_infos:
                        raise IOError(
                            f'{dgk} unable to find tagging event for for extra_packages {extra_package_name} in tag {extra_package_brew_tag}'
                        )

                    # Otherwise, we have information about the most recent time this package was tagged into the
                    # relevant tag. Why the tagging event and not the build time? Well, the build could have been
                    # made long ago, but only tagged into the relevant tag recently.
                    extra_latest_tagging_event = extra_latest_tagging_infos[0][
                        'create_event']
                    self.logger.debug(
                        f'Checking image creation time against extra_packages {extra_package_name} in tag {extra_package_brew_tag} @ tagging event {extra_latest_tagging_event}'
                    )
                    if extra_latest_tagging_event > image_build_event_id:
                        return self, True, f'Image {dgk} is sensitive to extra_packages {extra_package_name} which changed at event {extra_latest_tagging_event}'

            # Collect build times from any parent/builder images used to create this image
            builders = list(self.config['from'].builder) or []
            builders.append(
                self.config['from'])  # Add the parent image to the builders
            for builder in builders:
                if builder.member:
                    # We can't determine if images are about to change. Defer to scan-sources.
                    continue

                if builder.image:
                    builder_image_name = builder.image
                elif builder.stream:
                    builder_image_name = runtime.resolve_stream(
                        builder.stream).image
                else:
                    raise IOError(
                        f'Unable to determine builder or parent image pullspec from {builder}'
                    )

                # builder_image_name example: "openshift/ose-base:ubi8"
                brew_image_url = self.runtime.resolve_brew_image_url(
                    builder_image_name)
                builder_brew_build = ImageMetadata.builder_image_builds.get(
                    brew_image_url, None)

                if not builder_brew_build:
                    out, err = exectools.cmd_assert(
                        f'oc image info {brew_image_url} --filter-by-os amd64 -o=json',
                        retries=5,
                        pollrate=10)
                    latest_builder_image_info = Model(
                        json.loads(out, encoding='utf-8'))
                    builder_info_labels = latest_builder_image_info.config.config.Labels
                    builder_nvr_list = [
                        builder_info_labels['com.redhat.component'],
                        builder_info_labels['version'],
                        builder_info_labels['release']
                    ]

                    if not all(builder_nvr_list):
                        raise IOError(
                            f'Unable to find nvr in {builder_info_labels}')

                    builder_image_nvr = '-'.join(builder_nvr_list)
                    builder_brew_build = koji_api.getBuild(builder_image_nvr)
                    ImageMetadata.builder_image_builds[
                        brew_image_url] = builder_brew_build
                    self.logger.debug(
                        f'Found that builder or parent image {brew_image_url} has event {builder_brew_build}'
                    )

                if image_build_event_id < builder_brew_build[
                        'creation_event_id']:
                    self.logger.info(
                        f'will be rebuilt because a builder or parent image changed: {builder_image_name}'
                    )
                    return self, True, f'A builder or parent image {builder_image_name} has changed since {image_nvr} was built'

            build_root_change = brew.has_tag_changed_since_build(runtime,
                                                                 koji_api,
                                                                 image_build,
                                                                 buildroot_tag,
                                                                 inherit=True)
            if build_root_change:
                self.logger.info(
                    f'Image will be rebuilt due to buildroot change since {image_nvr} (last build event={image_build_event_id}). Build root change: [{build_root_change}]'
                )
                return self, True, f'Buildroot tag changes since {image_nvr} was built'

            archives = koji_api.listArchives(image_build['id'])

            # Compare to the arches in runtime
            build_arches = set()
            for a in archives:
                # When running with cachito, not all archives returned are images. Filter out non-images.
                if a['btype'] == 'image':
                    build_arches.add(a['extra']['image']['arch'])

            target_arches = set(self.get_arches())
            if target_arches != build_arches:
                # The latest brew build does not exactly match the required arches as specified in group.yml
                return self, True, f'Arches of {image_nvr}: ({build_arches}) does not match target arches {target_arches}'

            for archive in archives:
                # Example results of listing RPMs in an given imageID:
                # https://gist.github.com/jupierce/a8798858104dcf6dfa4bd1d6dd99d2d8
                archive_id = archive['id']
                rpm_entries = koji_api.listRPMs(imageID=archive_id)
                for rpm_entry in rpm_entries:
                    build_id = rpm_entry['build_id']
                    build = koji_api.getBuild(
                        build_id, brew.KojiWrapperOpts(caching=True))
                    package_name = build['package_name']
                    if package_name in changing_rpm_packages:
                        return self, True, f'Image includes {package_name} which is also about to change'
                    # Several RPMs may belong to the same package, and each archive must use the same
                    # build of a package, so all we need to collect is the set of build_ids for the packages
                    # across all of the archives.
                    builds_contained_in_archives[build_id] = build

        self.logger.info(
            f'Checking whether any of the installed builds {len(builds_contained_in_archives)} has been tagged by a relevant tag since this image\'s build brew event {image_build_event_id}'
        )

        installed_builds = list(builds_contained_in_archives.values())
        # Shuffle the builds before starting the threads. The reason is that multiple images are going to be performing
        # these queries simultaneously. Those images have similar packages (typically rooting in a rhel base image).
        # The KojiWrapper caching mechanism will allow two simultaneous calls to a Koji API to hit the actual
        # server since no result has yet been returned. Shuffling the installed package list spreads the threads
        # out among the packages to reduce re-work by the server.
        random.shuffle(installed_builds)
        changes_res = runtime.parallel_exec(
            f=lambda installed_package_build,
            terminate_event: is_image_older_than_package_build_tagging(
                self, image_build_event_id, installed_package_build,
                newest_image_event_ts, oldest_image_event_ts),
            args=installed_builds,
            n_threads=10)

        for changed, msg in changes_res.get():
            if changed:
                return self, True, msg

        return self, False, None