Exemplo n.º 1
0
def assembly_metadata_config(releases_config: Model,
                             assembly: typing.Optional[str], meta_type: str,
                             distgit_key: str, meta_config: Model) -> Model:
    """
    Returns a group member's metadata configuration based on the assembly information
    and the initial file-based config.
    :param releases_config: A Model for releases.yaml.
    :param assembly: The name of the assembly
    :param meta_type: 'rpm' or 'image'
    :param distgit_key: The member's distgit_key
    :param meta_config: The meta's config object
    :return: Returns a computed config for the metadata (e.g. value for meta.config).
    """
    if not assembly or not isinstance(releases_config, Model):
        return meta_config

    _check_recursion(releases_config, assembly)
    target_assembly = releases_config.releases[assembly].assembly

    if target_assembly.basis.assembly:  # Does this assembly inherit from another?
        # Recursive apply ancestor assemblies
        meta_config = assembly_metadata_config(releases_config,
                                               target_assembly.basis.assembly,
                                               meta_type, distgit_key,
                                               meta_config)

    config_dict = meta_config.primitive()

    component_list = target_assembly.members[f'{meta_type}s']
    for component_entry in component_list:
        if component_entry.distgit_key == '*' or component_entry.distgit_key == distgit_key and component_entry.metadata:
            config_dict = merger(component_entry.metadata.primitive(),
                                 config_dict)

    return Model(dict_to_model=config_dict)
Exemplo n.º 2
0
    def test_latest_container(self, meta_mock, id_mock):
        # "normal" lookup
        id_mock.return_value = "dummy"
        meta_mock.return_value = dict(
            oscontainer=dict(image="test", digest="sha256:1234abcd"))
        self.assertEqual(("dummy", "test@sha256:1234abcd"),
                         rhcos.RHCOSBuildFinder(self.runtime,
                                                "4.4").latest_container())

        # lookup when there is no build to look up
        id_mock.return_value = None
        self.assertEqual((None, None),
                         rhcos.RHCOSBuildFinder(self.runtime,
                                                "4.4").latest_container())

        # lookup when we have configured a different primary container
        self.runtime.group_config.rhcos = Model(
            dict(payload_tags=[
                dict(name="spam"),
                dict(name="eggs", primary=True)
            ]))
        id_mock.return_value = "dummy"
        meta_mock.return_value = dict(
            oscontainer=dict(image="test", digest="sha256:1234abcdstandard"),
            altcontainer=dict(image="test", digest="sha256:abcd1234alt"),
        )
        alt_container = dict(name="rhel-coreos-8",
                             build_metadata_key="altcontainer",
                             primary=True)
        self.runtime.group_config.rhcos = Model(
            dict(payload_tags=[alt_container]))
        self.assertEqual(("dummy", "test@sha256:abcd1234alt"),
                         rhcos.RHCOSBuildFinder(self.runtime,
                                                "4.4").latest_container())
Exemplo n.º 3
0
def assembly_streams_config(releases_config: Model,
                            assembly: typing.Optional[str],
                            streams_config: Model) -> Model:
    """
    Returns a streams config based on the assembly information
    and the input group config.
    :param releases_config: A Model for releases.yaml.
    :param assembly: The name of the assembly
    :param streams_config: The streams config to merge into a new streams config (original Model will not be altered)
    """
    if not assembly or not isinstance(releases_config, Model):
        return streams_config

    _check_recursion(releases_config, assembly)
    target_assembly = releases_config.releases[assembly].assembly

    if target_assembly.basis.assembly:  # Does this assembly inherit from another?
        # Recursively apply ancestor assemblies
        streams_config = assembly_streams_config(
            releases_config, target_assembly.basis.assembly, streams_config)

    target_assembly_streams = target_assembly.group
    if not target_assembly_streams:
        return streams_config

    return Model(dict_to_model=merger(target_assembly_streams.primitive(),
                                      streams_config.primitive()))
Exemplo n.º 4
0
 def setUp(self):
     self.runtime = MagicMock(group_config=Model(
         dict(
             arches=["x86_64", "s390x", "ppc64le", "aarch64"],
             multi_arch=dict(enabled=True),
         )))
     subject.image_info_cache = {}
Exemplo n.º 5
0
    def __init__(self, runtime, pullspec_for_tag: Dict[str, str], brew_arch: str):
        self.runtime = runtime
        self.brew_arch = brew_arch
        self.pullspec_for_tag = pullspec_for_tag
        self.build_id = None

        # Remember the pullspec(s) provided in case it does not match what is in the releases.yaml.
        # Because of an incident where we needed to repush RHCOS and get a new SHA for 4.10 GA,
        # trust the exact pullspec in releases.yml instead of what we find in the RHCOS release
        # browser.
        for tag, pullspec in pullspec_for_tag.items():
            image_info_str, _ = exectools.cmd_assert(f'oc image info -o json {pullspec}', retries=3)
            image_info = Model(json.loads(image_info_str))
            build_id = image_info.config.config.Labels.version
            if not build_id:
                raise Exception(f'Unable to determine RHCOS build_id from tag {tag} pullspec {pullspec}. Retrieved image info: {image_info_str}')
            if self.build_id and self.build_id != build_id:
                raise Exception(f'Found divergent RHCOS build_id for {pullspec_for_tag}. {build_id} versus {self.build_id}')
            self.build_id = build_id

        # The first digits of the RHCOS build are the major.minor of the rhcos stream name.
        # Which, near branch cut, might not match the actual release stream.
        # Sadly we don't have any other labels or anything to look at to determine the stream.
        version = self.build_id.split('.')[0]
        self.stream_version = version[0] + '.' + version[1:]  # e.g. 43.82.202102081639.0 -> "4.3"

        try:
            finder = RHCOSBuildFinder(runtime, self.stream_version, self.brew_arch)
            self._build_meta = finder.rhcos_build_meta(self.build_id, meta_type='meta')
            self._os_commitmeta = finder.rhcos_build_meta(self.build_id, meta_type='commitmeta')
        except Exception:
            # Fall back to trying to find a custom build
            finder = RHCOSBuildFinder(runtime, self.stream_version, self.brew_arch, custom=True)
            self._build_meta = finder.rhcos_build_meta(self.build_id, meta_type='meta')
            self._os_commitmeta = finder.rhcos_build_meta(self.build_id, meta_type='commitmeta')
Exemplo n.º 6
0
def _assembly_config_struct(releases_config: Model,
                            assembly: typing.Optional[str], key: str, default):
    """
    If a key is directly under the 'assembly' (e.g. rhcos), then this method will
    recurse the inheritance tree to build you a final version of that key's value.
    The key may refer to a list or dict (set default value appropriately).
    """
    if not assembly or not isinstance(releases_config, Model):
        return Missing

    _check_recursion(releases_config, assembly)
    target_assembly = releases_config.releases[assembly].assembly
    key_struct = target_assembly.get(key, default)
    if target_assembly.basis.assembly:  # Does this assembly inherit from another?
        # Recursive apply ancestor assemblies
        parent_config_struct = _assembly_config_struct(
            releases_config, target_assembly.basis.assembly, key, default)
        key_struct = merger(key_struct, parent_config_struct.primitive())
    if isinstance(default, dict):
        return Model(dict_to_model=key_struct)
    elif isinstance(default, list):
        return ListModel(list_to_model=key_struct)
    else:
        raise ValueError(
            f'Unknown how to derive for default type: {type(default)}')
Exemplo n.º 7
0
    def test_release_url(self):
        self.assertIn(
            "4.6-s390x",
            rhcos.RHCOSBuildFinder(self.runtime, "4.6",
                                   "s390x").rhcos_release_url())
        self.assertNotIn(
            "x86_64",
            rhcos.RHCOSBuildFinder(self.runtime, "4.6",
                                   "x86_64").rhcos_release_url())
        self.assertIn(
            "4.9-aarch64",
            rhcos.RHCOSBuildFinder(self.runtime, "4.9",
                                   "aarch64").rhcos_release_url())

        self.runtime.group_config.urls = Model(
            dict(rhcos_release_base=dict(
                aarch64="https//example.com/storage/releases/rhcos-4.x-aarch64"
            )))
        self.assertIn(
            "4.x-aarch64",
            rhcos.RHCOSBuildFinder(self.runtime, "4.9",
                                   "aarch64").rhcos_release_url())
        self.assertIn(
            "4.9-s390x",
            rhcos.RHCOSBuildFinder(self.runtime, "4.9",
                                   "s390x").rhcos_release_url())
Exemplo n.º 8
0
 async def retrieve_image_info_async(self, pullspec: str) -> Model:
     """pull/cache/return json info for a container pullspec (enable concurrency)"""
     if pullspec not in image_info_cache:
         image_json_str, _ = await exectools.cmd_assert_async(
             f"oc image info {pullspec} -o=json --filter-by-os=amd64",
             retries=3)
         image_info_cache[pullspec] = Model(json.loads(image_json_str))
     return image_info_cache[pullspec]
Exemplo n.º 9
0
    def test_retrieve_nvr_for_tag(self, mock_rii):
        mock_rii.return_value = Model(
            dict(config=dict(config=dict(
                Labels={
                    "com.redhat.component": "spam",
                    "version": "1.0",
                    "release": "1.el8",
                }))))
        nightly = self.vanilla_nightly()
        self.assertEqual(("spam", "1.0", "1.el8"),
                         nightly.retrieve_nvr_for_tag("pod"))

        mock_rii.return_value = Exception()  # should be cached from last call
        self.assertEqual(("spam", "1.0", "1.el8"),
                         nightly.retrieve_nvr_for_tag("pod"))

        mock_rii.return_value = Model()  # no labels provided
        nightly.pullspec_for_tag["rhcos"] = "rhcos_ps"
        self.assertIsNone(nightly.retrieve_nvr_for_tag("rhcos"))
Exemplo n.º 10
0
    def test_find_rhcos_payload_entries(self):
        rhcos_build = MagicMock()
        assembly_inspector = MagicMock()
        assembly_inspector.get_rhcos_build.return_value = rhcos_build
        rhcos_build.get_container_configs.return_value = [
            Model(dict(name="spam", build_metadata_tag="eggs", primary=True)),
            Model(dict(name="foo", build_metadata_tag="bar")),
        ]

        # test when a primary container is missing from rhcos build
        rhcos_build.get_container_pullspec.side_effect = [
            rhcos.RhcosMissingContainerException("primary missing"),
            "somereg/somerepo@sha256:somesum",
        ]
        rhcos_entries, issues = rgp_cli.PayloadGenerator._find_rhcos_payload_entries(
            assembly_inspector, "arch")
        self.assertNotIn("spam", rhcos_entries)
        self.assertIn("foo", rhcos_entries)
        self.assertEqual(issues[0].code, AssemblyIssueCode.IMPERMISSIBLE)

        # test when a non-primary container is missing from rhcos build
        rhcos_build.get_container_pullspec.side_effect = [
            "somereg/somerepo@sha256:somesum",
            rhcos.RhcosMissingContainerException("non-primary missing"),
        ]
        rhcos_entries, issues = rgp_cli.PayloadGenerator._find_rhcos_payload_entries(
            assembly_inspector, "arch")
        self.assertIn("spam", rhcos_entries)
        self.assertNotIn("foo", rhcos_entries)
        self.assertEqual(issues[0].code,
                         AssemblyIssueCode.MISSING_RHCOS_CONTAINER)

        # test when no container is missing from rhcos build
        rhcos_build.get_container_pullspec.side_effect = [
            "somereg/somerepo@sha256:somesum",
            "somereg/somerepo@sha256:someothersum",
        ]
        rhcos_entries, issues = rgp_cli.PayloadGenerator._find_rhcos_payload_entries(
            assembly_inspector, "arch")
        self.assertEqual([], issues)
        self.assertEqual(2, len(rhcos_entries))
Exemplo n.º 11
0
    def test_inspector_get_container_pullspec(self, rhcos_build_meta_mock,
                                              cmd_assert_mock):
        # mock out the things RHCOSBuildInspector calls in __init__
        rhcos_meta = {"buildid": "412.86.bogus"}
        rhcos_commitmeta = {}
        rhcos_build_meta_mock.side_effect = [rhcos_meta, rhcos_commitmeta]
        cmd_assert_mock.return_value = (
            '{"config": {"config": {"Labels": {"version": "412.86.bogus"}}}}',
            None)
        pullspecs = {'machine-os-content': 'spam@eggs'}
        rhcos_build = rhcos.RHCOSBuildInspector(self.runtime, pullspecs,
                                                's390x')

        # test its behavior on misconfiguration / edge case
        container_conf = dict(name='spam', build_metadata_key='eggs')
        with self.assertRaises(rhcos.RhcosMissingContainerException):
            rhcos_build.get_container_pullspec(Model(container_conf))
Exemplo n.º 12
0
    def test_detect_rhcos_status(self, mock_latest, mock_tagged):
        mock_tagged.return_value = "id-1"
        mock_latest.return_value = "id-2"
        runtime = MagicMock(group_config=Model())
        runtime.get_minor_version.return_value = "4.2"
        runtime.arches = ['s390x']

        statuses = scan_sources._detect_rhcos_status(runtime,
                                                     kubeconfig="dummy")
        self.assertEqual(2, len(statuses),
                         "expect public and private status reported")
        self.assertTrue(all(s['changed'] for s in statuses),
                        "expect changed status reported")
        self.assertTrue(all("id-1" in s['reason'] for s in statuses),
                        "expect previous id in reason")
        self.assertTrue(all("id-2" in s['reason'] for s in statuses),
                        "expect changed id in reason")
Exemplo n.º 13
0
    def test_get_primary_container_conf(self):
        # default is same as it's always been
        self.assertEqual(
            "machine-os-content",
            rhcos.RHCOSBuildFinder(
                self.runtime, "4.6",
                "x86_64").get_primary_container_conf()["name"])

        # but we can configure a different primary
        self.runtime.group_config.rhcos = Model(
            dict(payload_tags=[
                dict(name="spam"),
                dict(name="eggs", primary=True)
            ]))
        self.assertEqual(
            "eggs",
            rhcos.RHCOSBuildFinder(
                self.runtime, "4.6",
                "x86_64").get_primary_container_conf()["name"])
Exemplo n.º 14
0
    def test_determine_arch_list(self):
        self.assertEqual(
            # {"aarch64", "x86_64", "multi"},  # no multi yet
            {"aarch64", "x86_64"},
            set(subject.determine_arch_list(self.runtime,
                                            ["s390x", "ppc64le"])))

        runtime = MagicMock(
            group_config=Model(dict(arches=["x86_64", "aarch64"])))
        with self.assertRaises(
                ValueError,
                msg="should fail when specifying non-configured arch"):
            subject.determine_arch_list(runtime, ["bogus"])
        # with self.assertRaises(ValueError, msg="should fail when specifying multi if not configured"):
        #     subject.determine_arch_list(runtime, ["multi"])  # no multi yet

        self.assertEqual({"aarch64"},
                         subject.determine_arch_list(runtime, {"x86_64"}))
        self.assertEqual({"x86_64", "aarch64"},
                         subject.determine_arch_list(runtime, set()))
Exemplo n.º 15
0
    def __init__(self, runtime, pullspec_or_build_id: str, brew_arch: str):
        self.runtime = runtime
        self.brew_arch = brew_arch

        if pullspec_or_build_id[0].isdigit():
            self.build_id = pullspec_or_build_id
        else:
            pullspec = pullspec_or_build_id
            image_info_str, _ = exectools.cmd_assert(
                f'oc image info -o json {pullspec}', retries=3)
            image_info = Model(dict_to_model=json.loads(image_info_str))
            self.build_id = image_info.config.config.Labels.version
            if not self.build_id:
                raise Exception(
                    f'Unable to determine MOSC build_id from: {pullspec}. Retrieved image info: {image_info_str}'
                )

        # The first digits of the RHCOS build are the major.minor of the rhcos stream name.
        # Which, near branch cut, might not match the actual release stream.
        # Sadly we don't have any other labels or anything to look at to determine the stream.
        version = self.build_id.split('.')[0]
        self.stream_version = version[0] + '.' + version[
            1:]  # e.g. 43.82.202102081639.0 -> "4.3"

        try:
            finder = RHCOSBuildFinder(runtime, self.stream_version,
                                      self.brew_arch)
            self._build_meta = finder.rhcos_build_meta(self.build_id,
                                                       meta_type='meta')
            self._os_commitmeta = finder.rhcos_build_meta(
                self.build_id, meta_type='commitmeta')
        except:
            # Fall back to trying to find a custom build
            finder = RHCOSBuildFinder(runtime,
                                      self.stream_version,
                                      self.brew_arch,
                                      custom=True)
            self._build_meta = finder.rhcos_build_meta(self.build_id,
                                                       meta_type='meta')
            self._os_commitmeta = finder.rhcos_build_meta(
                self.build_id, meta_type='commitmeta')
Exemplo n.º 16
0
    def test_asembly_metadata_config(self):

        meta_config = Model(
            dict_to_model={
                'owners': ['*****@*****.**'],
                'content': {
                    'source': {
                        'git': {
                            'url':
                            '[email protected]:openshift-priv/kuryr-kubernetes.git',
                            'branch': {
                                'target': 'release-4.8',
                            }
                        },
                        'specfile': 'openshift-kuryr-kubernetes-rhel8.spec'
                    }
                },
                'name': 'openshift-kuryr'
            })

        config = assembly_metadata_config(self.releases_config, 'ART_1', 'rpm',
                                          'openshift-kuryr', meta_config)
        # Ensure no loss
        self.assertEqual(config.name, 'openshift-kuryr')
        self.assertEqual(len(config.owners), 1)
        self.assertEqual(config.owners[0], '*****@*****.**')
        # Check that things were overridden
        self.assertEqual(config.content.source.git.url,
                         '[email protected]:jupierce/kuryr-kubernetes.git')
        self.assertEqual(config.content.source.git.branch.target, '1_hash')

        config = assembly_metadata_config(self.releases_config, 'ART_5', 'rpm',
                                          'openshift-kuryr', meta_config)
        # Ensure no loss
        self.assertEqual(config.name, 'openshift-kuryr')
        self.assertEqual(len(config.owners), 1)
        self.assertEqual(config.owners[0], '*****@*****.**')
        # Check that things were overridden
        self.assertEqual(config.content.source.git.url,
                         '[email protected]:jupierce/kuryr-kubernetes.git')
        self.assertEqual(config.content.source.git.branch.target, '2_hash')

        config = assembly_metadata_config(self.releases_config, 'ART_6', 'rpm',
                                          'openshift-kuryr', meta_config)
        # Ensure no loss
        self.assertEqual(config.name, 'openshift-kuryr')
        self.assertEqual(len(config.owners), 1)
        self.assertEqual(config.owners[0], '*****@*****.**')
        # Check that things were overridden. 6 changes branches for all rpms
        self.assertEqual(config.content.source.git.url,
                         '[email protected]:jupierce/kuryr-kubernetes.git')
        self.assertEqual(config.content.source.git.branch.target, 'customer_6')

        config = assembly_metadata_config(self.releases_config, 'ART_8',
                                          'image', 'openshift-kuryr',
                                          meta_config)
        # Ensure no loss
        self.assertEqual(config.name, 'openshift-kuryr')
        self.assertEqual(config.content.source.git.url,
                         '[email protected]:jupierce/kuryr-kubernetes.git')
        self.assertEqual(config.content.source.git.branch.target, '1_hash')
        # Ensure that 'is' comes from ART_8 and not ART_7
        self.assertEqual(config['is'], 'kuryr-nvr2')
        # Ensure that 'dependencies' were accumulate
        self.assertEqual(len(config.dependencies.rpms), 2)

        try:
            assembly_metadata_config(self.releases_config, 'ART_INFINITE',
                                     'rpm', 'openshift-kuryr', meta_config)
            self.fail('Expected ValueError on assembly infinite recursion')
        except ValueError:
            pass
        except Exception as e:
            self.fail(
                f'Expected ValueError on assembly infinite recursion but got: {type(e)}: {e}'
            )
Exemplo n.º 17
0
    def test_assembly_group_config(self):

        group_config = Model(dict_to_model={
            'arches': ['x86_64'],
            'advisories': {
                'image': 1,
                'extras': 1,
            }
        })

        config = assembly_group_config(self.releases_config, 'ART_1',
                                       group_config)
        self.assertEqual(len(config.arches), 3)

        config = assembly_group_config(self.releases_config, 'ART_2',
                                       group_config)
        self.assertEqual(len(config.arches), 2)

        # 3 inherits from 2 an only overrides advisory value
        config = assembly_group_config(self.releases_config, 'ART_3',
                                       group_config)
        self.assertEqual(len(config.arches), 2)
        self.assertEqual(config.advisories.image, 31)
        self.assertEqual(
            config.advisories.extras,
            1)  # Extras never override, so should be from group_config

        # 4 inherits from 3, but sets "advsories!"
        config = assembly_group_config(self.releases_config, 'ART_4',
                                       group_config)
        self.assertEqual(len(config.arches), 2)
        self.assertEqual(config.advisories.image, 41)
        self.assertEqual(config.advisories.extras, Missing)

        # 5 inherits from 4, but sets "advsories!" (overriding 4's !) and "arches!"
        config = assembly_group_config(self.releases_config, 'ART_5',
                                       group_config)
        self.assertEqual(len(config.arches), 1)
        self.assertEqual(config.advisories.image, 51)

        config = assembly_group_config(self.releases_config, 'not_defined',
                                       group_config)
        self.assertEqual(len(config.arches), 1)

        config = assembly_group_config(self.releases_config, 'ART_7',
                                       group_config)
        self.assertEqual(len(config.dependencies.rpms), 1)

        config = assembly_group_config(self.releases_config, 'ART_8',
                                       group_config)
        self.assertEqual(len(config.dependencies.rpms), 2)

        try:
            assembly_group_config(self.releases_config, 'ART_INFINITE',
                                  group_config)
            self.fail('Expected ValueError on assembly infinite recursion')
        except ValueError:
            pass
        except Exception as e:
            self.fail(
                f'Expected ValueError on assembly infinite recursion but got: {type(e)}: {e}'
            )
Exemplo n.º 18
0
    def setUp(self) -> None:
        releases_yml = """
releases:
  ART_1:
    assembly:
      members:
        rpms:
        - distgit_key: openshift-kuryr
          metadata:  # changes to make the metadata
            content:
              source:
                git:
                  url: [email protected]:jupierce/kuryr-kubernetes.git
                  branch:
                    target: 1_hash
      group:
        arches:
        - x86_64
        - ppc64le
        - s390x
        advisories:
          image: 11
          extras: 12

  ART_2:
    assembly:
      basis:
        brew_event: 5
      members:
        rpms:
        - distgit_key: openshift-kuryr
          metadata:  # changes to make the metadata
            content:
              source:
                git:
                  url: [email protected]:jupierce/kuryr-kubernetes.git
                  branch:
                    target: 2_hash
      group:
        arches:
        - x86_64
        - s390x
        advisories:
          image: 21

  ART_3:
    assembly:
      basis:
        assembly: ART_2
      group:
        advisories:
          image: 31

  ART_4:
    assembly:
      basis:
        assembly: ART_3
      group:
        advisories!:
          image: 41

  ART_5:
    assembly:
      basis:
        assembly: ART_4
      group:
        arches!:
        - s390x
        advisories!:
          image: 51

  ART_6:
    assembly:
      basis:
        assembly: ART_5
      members:
        rpms:
        - distgit_key: '*'
          metadata:
            content:
              source:
                git:
                  branch:
                    target: customer_6

  ART_7:
    assembly:
      basis:
        brew_event: 5
      members:
        images:
        - distgit_key: openshift-kuryr
          metadata:
            content:
              source:
                git:
                  url: [email protected]:jupierce/kuryr-kubernetes.git
                  branch:
                    target: 1_hash
            is: kuryr-nvr
            dependencies:
              rpms:
              - el7: some-nvr-1
                non_gc_tag: some-tag-1
      group:
        dependencies:
          rpms:
            - el7: some-nvr-3
              non_gc_tag: some-tag-3
      rhcos:
        machine-os-content:
          images:
            x86_64: registry.example.com/rhcos-x86_64:test
        dependencies:
          rpms:
            - el7: some-nvr-4
              non_gc_tag: some-tag-4
            - el8: some-nvr-5
              non_gc_tag: some-tag-4

  ART_8:
    assembly:
      basis:
        assembly: ART_7
      members:
        images:
        - distgit_key: openshift-kuryr
          metadata:
            is: kuryr-nvr2
            dependencies:
              rpms:
              - el7: some-nvr-2
                non_gc_tag: some-tag-2
      group:
        dependencies:
          rpms:
            - el7: some-nvr-4
              non_gc_tag: some-tag-4
      rhcos:
        machine-os-content:
          images: {}
        dependencies:
          rpms:
            - el8: some-nvr-6
              non_gc_tag: some-tag-6

  ART_INFINITE:
    assembly:
      basis:
        assembly: ART_INFINITE
      members:
        rpms:
        - distgit_key: '*'
          metadata:
            content:
              source:
                git:
                  branch:
                    target: customer_6

"""
        self.releases_config = Model(
            dict_to_model=yaml.safe_load(releases_yml))
Exemplo n.º 19
0
    def does_image_need_change(self,
                               changing_rpm_packages=[],
                               buildroot_tag=None,
                               newest_image_event_ts=None,
                               oldest_image_event_ts=None):
        """
        Answers the question of whether the latest built image needs to be rebuilt based on
        the packages (and therefore RPMs) it is dependent on might have changed in tags
        relevant to the image. A check is also made if the image depends on a package
        we know is changing because we are about to rebuild it.
        :param changing_rpm_packages: A list of package names that are about to change.
        :param buildroot_tag: The build root for this image
        :param newest_image_event_ts: The build timestamp of the most recently built image in this group.
        :param oldest_image_event_ts: The build timestamp of the oldest build in this group from getLatestBuild of each component.
        :return: (meta, <bool>, messsage). If True, the image might need to be rebuilt -- the message will say
                why. If False, message will be None.
        """

        dgk = self.distgit_key
        runtime = self.runtime

        builds_contained_in_archives = {
        }  # build_id => result of koji.getBuild(build_id)
        with runtime.pooled_koji_client_session() as koji_api:

            image_build = self.get_latest_build(default='')
            if not image_build:
                # Seems this have never been built. Mark it as needing change.
                return self, True, 'Image has never been built before'

            self.logger.debug(f'Image {dgk} latest is {image_build}')

            image_nvr = image_build['nvr']
            image_build_event_id = image_build[
                'creation_event_id']  # the brew event that created this build

            self.logger.info(
                f'Running a change assessment on {image_nvr} built at event {image_build_event_id}'
            )

            # Very rarely, an image might need to pull a package that is not actually installed in the
            # builder image or in the final image.
            # e.g. https://github.com/openshift/ironic-ipa-downloader/blob/999c80f17472d5dbbd4775d901e1be026b239652/Dockerfile.ocp#L11-L14
            # This is programmatically undetectable through koji queries. So we allow extra scan-sources hints to
            # be placed in the image metadata.
            if self.config.scan_sources.extra_packages is not Missing:
                for package_details in self.config.scan_sources.extra_packages:
                    extra_package_name = package_details.name
                    extra_package_brew_tag = package_details.tag
                    # Example output: https://gist.github.com/jupierce/3bbc8be7265348a8f549d401664c9972
                    extra_latest_tagging_infos = koji_api.queryHistory(
                        table='tag_listing',
                        tag=extra_package_brew_tag,
                        package=extra_package_name,
                        active=True)['tag_listing']

                    if not extra_latest_tagging_infos:
                        raise IOError(
                            f'{dgk} unable to find tagging event for for extra_packages {extra_package_name} in tag {extra_package_brew_tag}'
                        )

                    # Otherwise, we have information about the most recent time this package was tagged into the
                    # relevant tag. Why the tagging event and not the build time? Well, the build could have been
                    # made long ago, but only tagged into the relevant tag recently.
                    extra_latest_tagging_event = extra_latest_tagging_infos[0][
                        'create_event']
                    self.logger.debug(
                        f'Checking image creation time against extra_packages {extra_package_name} in tag {extra_package_brew_tag} @ tagging event {extra_latest_tagging_event}'
                    )
                    if extra_latest_tagging_event > image_build_event_id:
                        return self, True, f'Image {dgk} is sensitive to extra_packages {extra_package_name} which changed at event {extra_latest_tagging_event}'

            # Collect build times from any parent/builder images used to create this image
            builders = list(self.config['from'].builder) or []
            builders.append(
                self.config['from'])  # Add the parent image to the builders
            for builder in builders:
                if builder.member:
                    # We can't determine if images are about to change. Defer to scan-sources.
                    continue

                if builder.image:
                    builder_image_name = builder.image
                elif builder.stream:
                    builder_image_name = runtime.resolve_stream(
                        builder.stream).image
                else:
                    raise IOError(
                        f'Unable to determine builder or parent image pullspec from {builder}'
                    )

                # builder_image_name example: "openshift/ose-base:ubi8"
                brew_image_url = self.runtime.resolve_brew_image_url(
                    builder_image_name)
                builder_brew_build = ImageMetadata.builder_image_builds.get(
                    brew_image_url, None)

                if not builder_brew_build:
                    out, err = exectools.cmd_assert(
                        f'oc image info {brew_image_url} --filter-by-os amd64 -o=json',
                        retries=5,
                        pollrate=10)
                    latest_builder_image_info = Model(
                        json.loads(out, encoding='utf-8'))
                    builder_info_labels = latest_builder_image_info.config.config.Labels
                    builder_nvr_list = [
                        builder_info_labels['com.redhat.component'],
                        builder_info_labels['version'],
                        builder_info_labels['release']
                    ]

                    if not all(builder_nvr_list):
                        raise IOError(
                            f'Unable to find nvr in {builder_info_labels}')

                    builder_image_nvr = '-'.join(builder_nvr_list)
                    builder_brew_build = koji_api.getBuild(builder_image_nvr)
                    ImageMetadata.builder_image_builds[
                        brew_image_url] = builder_brew_build
                    self.logger.debug(
                        f'Found that builder or parent image {brew_image_url} has event {builder_brew_build}'
                    )

                if image_build_event_id < builder_brew_build[
                        'creation_event_id']:
                    self.logger.info(
                        f'will be rebuilt because a builder or parent image changed: {builder_image_name}'
                    )
                    return self, True, f'A builder or parent image {builder_image_name} has changed since {image_nvr} was built'

            build_root_change = brew.has_tag_changed_since_build(runtime,
                                                                 koji_api,
                                                                 image_build,
                                                                 buildroot_tag,
                                                                 inherit=True)
            if build_root_change:
                self.logger.info(
                    f'Image will be rebuilt due to buildroot change since {image_nvr} (last build event={image_build_event_id}). Build root change: [{build_root_change}]'
                )
                return self, True, f'Buildroot tag changes since {image_nvr} was built'

            archives = koji_api.listArchives(image_build['id'])

            # Compare to the arches in runtime
            build_arches = set()
            for a in archives:
                # When running with cachito, not all archives returned are images. Filter out non-images.
                if a['btype'] == 'image':
                    build_arches.add(a['extra']['image']['arch'])

            target_arches = set(self.get_arches())
            if target_arches != build_arches:
                # The latest brew build does not exactly match the required arches as specified in group.yml
                return self, True, f'Arches of {image_nvr}: ({build_arches}) does not match target arches {target_arches}'

            for archive in archives:
                # Example results of listing RPMs in an given imageID:
                # https://gist.github.com/jupierce/a8798858104dcf6dfa4bd1d6dd99d2d8
                archive_id = archive['id']
                rpm_entries = koji_api.listRPMs(imageID=archive_id)
                for rpm_entry in rpm_entries:
                    build_id = rpm_entry['build_id']
                    build = koji_api.getBuild(
                        build_id, brew.KojiWrapperOpts(caching=True))
                    package_name = build['package_name']
                    if package_name in changing_rpm_packages:
                        return self, True, f'Image includes {package_name} which is also about to change'
                    # Several RPMs may belong to the same package, and each archive must use the same
                    # build of a package, so all we need to collect is the set of build_ids for the packages
                    # across all of the archives.
                    builds_contained_in_archives[build_id] = build

        self.logger.info(
            f'Checking whether any of the installed builds {len(builds_contained_in_archives)} has been tagged by a relevant tag since this image\'s build brew event {image_build_event_id}'
        )

        installed_builds = list(builds_contained_in_archives.values())
        # Shuffle the builds before starting the threads. The reason is that multiple images are going to be performing
        # these queries simultaneously. Those images have similar packages (typically rooting in a rhel base image).
        # The KojiWrapper caching mechanism will allow two simultaneous calls to a Koji API to hit the actual
        # server since no result has yet been returned. Shuffling the installed package list spreads the threads
        # out among the packages to reduce re-work by the server.
        random.shuffle(installed_builds)
        changes_res = runtime.parallel_exec(
            f=lambda installed_package_build,
            terminate_event: is_image_older_than_package_build_tagging(
                self, image_build_event_id, installed_package_build,
                newest_image_event_ts, oldest_image_event_ts),
            args=installed_builds,
            n_threads=10)

        for changed, msg in changes_res.get():
            if changed:
                return self, True, msg

        return self, False, None
Exemplo n.º 20
0
def gen_assembly_from_releases(ctx, runtime, nightlies, standards, custom):
    runtime.initialize(mode='both',
                       clone_distgits=False,
                       clone_source=False,
                       prevent_cloning=True)
    logger = runtime.logger
    gen_assembly_name = ctx.obj[
        'ASSEMBLY_NAME']  # The name of the assembly we are going to output

    # Create a map of package_name to RPMMetadata
    package_rpm_meta: Dict[str, RPMMetadata] = {
        rpm_meta.get_package_name(): rpm_meta
        for rpm_meta in runtime.rpm_metas()
    }

    def exit_with_error(msg):
        print(msg, file=sys.stderr)
        exit(1)

    if runtime.assembly != 'stream':
        exit_with_error(
            '--assembly must be "stream" in order to populate an assembly definition from nightlies'
        )

    if not nightlies and not standards:
        exit_with_error(
            'At least one release (--nightly or --standard) must be specified')

    if len(runtime.arches) != len(nightlies) + len(standards) and not custom:
        exit_with_error(
            f'Expected at least {len(runtime.arches)} nightlies; one for each group arch: {runtime.arches}'
        )

    reference_releases_by_arch: Dict[
        str, str] = dict()  # Maps brew arch name to nightly name
    mosc_by_arch: Dict[str, str] = dict(
    )  # Maps brew arch name to machine-os-content pullspec from nightly
    component_image_builds: Dict[str, BrewBuildImageInspector] = dict(
    )  # Maps component package_name to brew build dict found for nightly
    component_rpm_builds: Dict[str, Dict[int, Dict]] = dict(
    )  # Dict[ package_name ] -> Dict[ el? ] -> brew build dict
    basis_event_ts: float = 0.0

    release_pullspecs: Dict[str, str] = dict()
    for nightly_name in nightlies:
        major_minor, brew_cpu_arch, priv = util.isolate_nightly_name_components(
            nightly_name)
        if major_minor != runtime.get_minor_version():
            exit_with_error(
                f'Specified nightly {nightly_name} does not match group major.minor'
            )
        reference_releases_by_arch[brew_cpu_arch] = nightly_name
        rc_suffix = util.go_suffix_for_arch(brew_cpu_arch, priv)
        nightly_pullspec = f'registry.ci.openshift.org/ocp{rc_suffix}/release{rc_suffix}:{nightly_name}'
        if brew_cpu_arch in release_pullspecs:
            raise ValueError(
                f'Cannot process {nightly_name} since {release_pullspecs[brew_cpu_arch]} is already included'
            )
        release_pullspecs[brew_cpu_arch] = nightly_pullspec

    for standard_release_name in standards:
        version, brew_cpu_arch = standard_release_name.split(
            '-')  # 4.7.22-s390x => ['4.7.22', 's390x']
        major_minor = '.'.join(
            version.split('.')[:2]
        )  # isolate just x.y from version names like '4.77.22' and '4.8.0-rc.3'
        if major_minor != runtime.get_minor_version():
            exit_with_error(
                f'Specified release {standard_release_name} does not match group major.minor'
            )
        standard_pullspec = f'quay.io/openshift-release-dev/ocp-release:{standard_release_name}'
        if brew_cpu_arch in release_pullspecs:
            raise ValueError(
                f'Cannot process {standard_release_name} since {release_pullspecs[brew_cpu_arch]} is already included'
            )
        release_pullspecs[brew_cpu_arch] = standard_pullspec

    for brew_cpu_arch, pullspec in release_pullspecs.items():
        runtime.logger.info(f'Processing release: {pullspec}')

        release_json_str, _ = exectools.cmd_assert(
            f'oc adm release info {pullspec} -o=json', retries=3)
        release_info = Model(dict_to_model=json.loads(release_json_str))

        if not release_info.references.spec.tags:
            exit_with_error(
                f'Could not find any imagestream tags in release: {pullspec}')

        for component_tag in release_info.references.spec.tags:
            payload_tag_name = component_tag.name  # e.g. "aws-ebs-csi-driver"
            payload_tag_pullspec = component_tag['from'].name  # quay pullspec

            if payload_tag_name == 'machine-os-content':
                mosc_by_arch[brew_cpu_arch] = payload_tag_pullspec
                continue

            # The brew_build_inspector will take this archive image and find the actual
            # brew build which created it.
            brew_build_inspector = BrewBuildImageInspector(
                runtime, payload_tag_pullspec)
            package_name = brew_build_inspector.get_package_name()
            build_nvr = brew_build_inspector.get_nvr()
            if package_name in component_image_builds:
                # If we have already encountered this package once in the list of releases we are
                # processing, then make sure that the original NVR we found matches the new NVR.
                # We want the releases to be populated with identical builds.
                existing_nvr = component_image_builds[package_name].get_nvr()
                if build_nvr != existing_nvr:
                    exit_with_error(
                        f'Found disparate nvrs between releases; {existing_nvr} in processed and {build_nvr} in {pullspec}'
                    )
            else:
                # Otherwise, record the build as the first time we've seen an NVR for this
                # package.
                component_image_builds[package_name] = brew_build_inspector

            # We now try to determine a basis brew event that will
            # find this image during get_latest_build-like operations
            # for the assembly. At the time of this writing, metadata.get_latest_build
            # will only look for builds *completed* before the basis event. This could
            # be changed to *created* before the basis event in the future. However,
            # other logic that is used to find latest builds requires the build to be
            # tagged into an rhaos tag before the basis brew event.
            # To choose a safe / reliable basis brew event, we first find the
            # time at which a build was completed, then add 5 minutes.
            # That extra 5 minutes ensures brew will have had time to tag the
            # build appropriately for its build target. The 5 minutes is also
            # short enough to ensure that no other build of this image could have
            # completed before the basis event.

            completion_ts: float = brew_build_inspector.get_brew_build_dict(
            )['completion_ts']
            # If the basis event for this image is > the basis_event capable of
            # sweeping images we've already analyzed, increase the basis_event_ts.
            basis_event_ts = max(basis_event_ts, completion_ts + (60.0 * 5))

    # basis_event_ts should now be greater than the build completion / target tagging operation
    # for any (non machine-os-content) image in the nightlies. Because images are built after RPMs,
    # it must also hold that the basis_event_ts is also greater than build completion & tagging
    # of any member RPM.

    # Let's now turn the approximate basis_event_ts into a brew event number
    with runtime.shared_koji_client_session() as koji_api:
        basis_event = koji_api.getLastEvent(before=basis_event_ts)['id']

    logger.info(f'Estimated basis brew event: {basis_event}')
    logger.info(
        f'The following image package_names were detected in the specified releases: {component_image_builds.keys()}'
    )

    # That said, things happen. Let's say image component X was built in build X1 and X2.
    # Image component Y was build in Y1. Let's say that the ordering was X1, X2, Y1 and, for
    # whatever reason, we find X1 and Y1 in the user specified nightly. This means the basis_event_ts
    # we find for Y1 is going to find X2 instead of X1 if we used it as part of an assembly's basis event.

    # To avoid that, we now evaluate whether any images or RPMs defy our assumption that the nightly
    # corresponds to the basis_event_ts we have calculated. If we find something that will not be swept
    # correctly by the estimated basis event, we collect up the outliers (hopefully few in number) into
    # a list of packages which must be included in the assembly as 'is:'. This might happen if, for example,
    # an artist accidentally builds an image on the command line for the stream assembly; without this logic,
    # that build might be found by our basis event, but we will explicitly pin to the image in the nightly
    # component's NVR as an override in the assembly definition.
    force_is: Set[str] = set(
    )  # A set of package_names whose NVRs are not correctly sourced by the estimated basis_event
    for image_meta in runtime.image_metas():

        if image_meta.base_only or not image_meta.for_release:
            continue

        dgk = image_meta.distgit_key
        package_name = image_meta.get_component_name()
        basis_event_dict = image_meta.get_latest_build(
            default=None, complete_before_event=basis_event)
        if not basis_event_dict:
            exit_with_error(
                f'No image was found for assembly {runtime.assembly} for component {dgk} at estimated brew event {basis_event}. No normal reason for this to happen so exiting out of caution.'
            )

        basis_event_build_dict: BrewBuildImageInspector = BrewBuildImageInspector(
            runtime, basis_event_dict['id'])
        basis_event_build_nvr = basis_event_build_dict.get_nvr()

        if not image_meta.is_payload:
            # If this is not for the payload, the nightlies cannot have informed our NVR decision; just
            # pick whatever the estimated basis will pull and let the user know. If they want to change
            # it, they will need to pin it.
            logger.info(
                f'{dgk} non-payload build {basis_event_build_nvr} will be swept by estimated assembly basis event'
            )
            component_image_builds[package_name] = basis_event_build_dict
            continue

        # Otherwise, the image_meta is destined for the payload and analyzing the nightlies should
        # have given us an NVR which is expected to be selected by the assembly.

        if package_name not in component_image_builds:
            if custom:
                logger.warning(
                    f'Unable to find {dgk} in releases despite it being marked as is_payload in ART metadata; this may be because the image is not built for every arch or it is not labeled appropriately for the payload. Choosing what was in the estimated basis event sweep: {basis_event_build_nvr}'
                )
            else:
                logger.error(
                    f'Unable to find {dgk} in releases despite it being marked as is_payload in ART metadata; this may mean the image does not have the proper labeling for being in the payload. Choosing what was in the estimated basis event sweep: {basis_event_build_nvr}'
                )
            component_image_builds[package_name] = basis_event_build_dict
            continue

        ref_releases_component_build = component_image_builds[package_name]
        ref_nightlies_component_build_nvr = ref_releases_component_build.get_nvr(
        )

        if basis_event_build_nvr != ref_nightlies_component_build_nvr:
            logger.info(
                f'{dgk} build {basis_event_build_nvr} was selected by estimated basis event. That is not what is in the specified releases, so this image will be pinned.'
            )
            force_is.add(package_name)
            continue

        # Otherwise, the estimated basis event resolved the image nvr we found in the nightlies. The
        # image NVR does not need to be pinned. Yeah!
        pass

    # We should have found a machine-os-content for each architecture in the group for a standard assembly
    for arch in runtime.arches:
        if arch not in mosc_by_arch:
            if custom:
                # This is permitted for custom assemblies which do not need to be assembled for every
                # architecture. The customer may just need x86_64.
                logger.info(
                    f'Did not find machine-os-content image for active group architecture: {arch}; ignoring since this is custom.'
                )
            else:
                exit_with_error(
                    f'Did not find machine-os-content image for active group architecture: {arch}'
                )

    # We now have a list of image builds that should be selected by the assembly basis event
    # and those that will need to be forced with 'is'. We now need to perform a similar step
    # for RPMs. Look at the image contents, see which RPMs are in use. If we build them,
    # then the NVRs in the image must be selected by the estimated basis event. If they are
    # not, then we must pin the NVRs in the assembly definition.

    with runtime.shared_koji_client_session() as koji_api:

        archive_lists = brew.list_archives_by_builds(
            [b.get_brew_build_id() for b in component_image_builds.values()],
            "image", koji_api)
        rpm_build_ids = {
            rpm["build_id"]
            for archives in archive_lists for ar in archives
            for rpm in ar["rpms"]
        }
        logger.info("Querying Brew build information for %s RPM builds...",
                    len(rpm_build_ids))
        # We now have a list of all RPM builds which have been installed into the various images which
        # ART builds. Specifically the ART builds which went into composing the nightlies.
        ref_releases_rpm_builds: List[Dict] = brew.get_build_objects(
            rpm_build_ids, koji_api)

        for ref_releases_rpm_build in ref_releases_rpm_builds:
            package_name = ref_releases_rpm_build['package_name']
            if package_name in package_rpm_meta:  # Does ART build this package?
                rpm_meta = package_rpm_meta[package_name]
                dgk = rpm_meta.distgit_key
                rpm_build_nvr = ref_releases_rpm_build['nvr']
                # If so, what RHEL version is this build for?
                el_ver = util.isolate_el_version_in_release(
                    ref_releases_rpm_build['release'])
                if not el_ver:
                    exit_with_error(
                        f'Unable to isolate el? version in {rpm_build_nvr}')

                if package_name not in component_rpm_builds:
                    # If this is the first time we've seen this ART package, bootstrap a dict for its
                    # potentially different builds for different RHEL versions.
                    component_rpm_builds[package_name]: Dict[int,
                                                             Dict] = dict()

                if el_ver in component_rpm_builds[package_name]:
                    # We've already captured the build in our results
                    continue

                # Now it is time to see whether a query for the RPM from the basis event
                # estimate comes up with this RPM NVR.
                basis_event_build_dict = rpm_meta.get_latest_build(
                    el_target=el_ver, complete_before_event=basis_event)
                if not basis_event_build_dict:
                    exit_with_error(
                        f'No RPM was found for assembly {runtime.assembly} for component {dgk} at estimated brew event {basis_event}. No normal reason for this to happen so exiting out of caution.'
                    )

                if el_ver in component_rpm_builds[package_name]:
                    # We've already logged a build for this el version before
                    continue

                component_rpm_builds[package_name][
                    el_ver] = ref_releases_rpm_build
                basis_event_build_nvr = basis_event_build_dict['nvr']
                logger.info(
                    f'{dgk} build {basis_event_build_nvr} selected by scan against estimated basis event'
                )
                if basis_event_build_nvr != ref_releases_rpm_build['nvr']:
                    # The basis event estimate did not find the RPM from the nightlies. We have to pin the package.
                    logger.info(
                        f'{dgk} build {basis_event_build_nvr} was selected by estimated basis event. That is not what is in the specified releases, so this RPM will be pinned.'
                    )
                    force_is.add(package_name)

    # component_image_builds now contains a mapping of package_name -> BrewBuildImageInspector for all images that should be included
    # in the assembly.
    # component_rpm_builds now contains a mapping of package_name to different RHEL versions that should be included
    # in the assembly.
    # force_is is a set of package_names which were not successfully selected by the estimated basis event.

    image_member_overrides: List[Dict] = []
    rpm_member_overrides: List[Dict] = []
    for package_name in force_is:
        if package_name in component_image_builds:
            build_inspector: BrewBuildImageInspector = component_image_builds[
                package_name]
            dgk = build_inspector.get_image_meta().distgit_key
            image_member_overrides.append({
                'distgit_key': dgk,
                'why':
                'Query from assembly basis event failed to replicate referenced nightly content exactly. Pinning to replicate.',
                'metadata': {
                    'is': {
                        'nvr': build_inspector.get_nvr()
                    }
                }
            })
        elif package_name in component_rpm_builds:
            dgk = package_rpm_meta[package_name].distgit_key
            rpm_member_overrides.append({
                'distgit_key': dgk,
                'why':
                'Query from assembly basis event failed to replicate referenced nightly content exactly. Pinning to replicate.',
                'metadata': {
                    'is': {
                        f'el{el_ver}':
                        component_rpm_builds[package_name][el_ver]['nvr']
                        for el_ver in component_rpm_builds[package_name]
                    }
                }
            })

    group_info = {}
    if not custom:
        group_info['advisories'] = {
            'image': -1,
            'rpm': -1,
            'extras': -1,
            'metadata': -1,
        }
    else:
        # Custom payloads don't require advisories.
        # If the user has specified fewer nightlies than is required by this
        # group, then we need to override the group arches.
        group_info = {'arches!': list(mosc_by_arch.keys())}

    assembly_def = {
        'releases': {
            gen_assembly_name: {
                "assembly": {
                    'type': 'custom' if custom else 'standard',
                    'basis': {
                        'brew_event': basis_event,
                        'reference_releases': reference_releases_by_arch,
                    },
                    'group': group_info,
                    'rhcos': {
                        'machine-os-content': {
                            "images": mosc_by_arch,
                        }
                    },
                    'members': {
                        'rpms': rpm_member_overrides,
                        'images': image_member_overrides,
                    }
                }
            }
        }
    }

    print(yaml.dump(assembly_def))
Exemplo n.º 21
0
    def test_assembly_config_struct(self):
        release_configs = {
            "releases": {
                "child": {
                    "assembly": {
                        "basis": {
                            "assembly": "parent",
                        }
                    }
                },
                "parent": {
                    "assembly": {
                        "type": "custom"
                    }
                },
            }
        }
        actual = _assembly_config_struct(Model(release_configs), "child",
                                         "type", "standard")
        self.assertEqual(actual, "custom")

        release_configs = {
            "releases": {
                "child": {
                    "assembly": {
                        "basis": {
                            "assembly": "parent",
                        },
                        "type": "candidate"
                    }
                },
                "parent": {
                    "assembly": {
                        "type": "custom"
                    }
                },
            }
        }
        actual = _assembly_config_struct(Model(release_configs), "child",
                                         "type", "standard")
        self.assertEqual(actual, "candidate")

        release_configs = {
            "releases": {
                "child": {
                    "assembly": {
                        "basis": {
                            "assembly": "parent",
                        },
                    }
                },
                "parent": {
                    "assembly": {}
                },
            }
        }
        actual = _assembly_config_struct(Model(release_configs), "child",
                                         "type", "standard")
        self.assertEqual(actual, "standard")

        release_configs = {
            "releases": {
                "child": {
                    "assembly": {
                        "basis": {
                            "assembly": "parent",
                        },
                    }
                },
                "parent": {
                    "assembly": {
                        "type": None
                    },
                },
            }
        }
        actual = _assembly_config_struct(Model(release_configs), "child",
                                         "type", "standard")
        self.assertEqual(actual, None)

        release_configs = {
            "releases": {
                "child": {
                    "assembly": {
                        "basis": {
                            "assembly": "parent",
                        },
                        "foo": {
                            "a": 1,
                            "b": 2
                        }
                    }
                },
                "parent": {
                    "assembly": {
                        "foo": {
                            "b": 3,
                            "c": 4,
                        }
                    }
                },
            }
        }
        actual = _assembly_config_struct(Model(release_configs), "child",
                                         "foo", {})
        self.assertEqual(actual, {
            "a": 1,
            "b": 2,
            "c": 4,
        })
Exemplo n.º 22
0
    def _check_nightly_consistency(assembly_inspector: AssemblyInspector,
                                   nightly: str,
                                   arch: str) -> List[AssemblyIssue]:
        runtime = assembly_inspector.runtime

        def terminal_issue(msg: str) -> List[AssemblyIssue]:
            return [AssemblyIssue(msg, component='reference-releases')]

        issues: List[str]
        runtime.logger.info(f'Processing nightly: {nightly}')
        major_minor, brew_cpu_arch, priv = isolate_nightly_name_components(
            nightly)

        if major_minor != runtime.get_minor_version():
            return terminal_issue(
                f'Specified nightly {nightly} does not match group major.minor'
            )

        rc_suffix = go_suffix_for_arch(brew_cpu_arch, priv)

        retries: int = 3
        release_json_str = ''
        rc = -1
        pullspec = f'registry.ci.openshift.org/ocp{rc_suffix}/release{rc_suffix}:{nightly}'
        while retries > 0:
            rc, release_json_str, err = exectools.cmd_gather(
                f'oc adm release info {pullspec} -o=json')
            if rc == 0:
                break
            runtime.logger.warn(
                f'Error accessing nightly release info for {pullspec}:  {err}')
            retries -= 1

        if rc != 0:
            return terminal_issue(
                f'Unable to gather nightly release info details: {pullspec}; garbage collected?'
            )

        release_info = Model(dict_to_model=json.loads(release_json_str))
        if not release_info.references.spec.tags:
            return terminal_issue(f'Could not find tags in nightly {nightly}')

        issues: List[AssemblyIssue] = list()
        payload_entries: Dict[
            str, PayloadGenerator.
            PayloadEntry] = PayloadGenerator.find_payload_entries(
                assembly_inspector, arch, '')
        for component_tag in release_info.references.spec.tags:  # For each tag in the imagestream
            payload_tag_name: str = component_tag.name  # e.g. "aws-ebs-csi-driver"
            payload_tag_pullspec: str = component_tag[
                'from'].name  # quay pullspec
            if '@' not in payload_tag_pullspec:
                # This speaks to an invalid nightly, so raise and exception
                raise IOError(
                    f'Expected pullspec in {nightly}:{payload_tag_name} to be sha digest but found invalid: {payload_tag_pullspec}'
                )

            pullspec_sha = payload_tag_pullspec.rsplit('@', 1)[-1]
            entry = payload_entries.get(payload_tag_name, None)

            if not entry:
                raise IOError(
                    f'Did not find {nightly} payload tag {payload_tag_name} in computed assembly payload'
                )

            if entry.archive_inspector:
                if entry.archive_inspector.get_archive_digest(
                ) != pullspec_sha:
                    # Impermissible because the artist should remove the reference nightlies from the assembly definition
                    issues.append(
                        AssemblyIssue(
                            f'{nightly} contains {payload_tag_name} sha {pullspec_sha} but assembly computed archive: {entry.archive_inspector.get_archive_id()} and {entry.archive_inspector.get_archive_pullspec()}',
                            component='reference-releases'))
            elif entry.rhcos_build:
                if entry.rhcos_build.get_machine_os_content_digest(
                ) != pullspec_sha:
                    # Impermissible because the artist should remove the reference nightlies from the assembly definition
                    issues.append(
                        AssemblyIssue(
                            f'{nightly} contains {payload_tag_name} sha {pullspec_sha} but assembly computed rhcos: {entry.rhcos_build} and {entry.rhcos_build.get_machine_os_content_digest()}',
                            component='reference-releases'))
            else:
                raise IOError(f'Unsupported payload entry {entry}')

        return issues
Exemplo n.º 23
0
 def __init__(self, logger):
     self.logger = logger
     self.group_config = Model({})
Exemplo n.º 24
0
    def does_image_need_change(self,
                               eldest_image_event_ts,
                               changing_rpm_packages=[],
                               buildroot_tag_ids=None):
        """
        TODO: this method is threadsafe now. Call it with threads!
        Answers the question of whether the latest built image needs to be rebuilt based on
        the packages (and therefore RPMs) it is dependent on might have changed in tags
        relevant to the image. A check is also made if the image depends on a package
        we know is changing because we are about to rebuild it.
        :param eldest_image_event_ts: The brew timestamp for the creation of the oldest image in this group.
        :param changing_rpm_packages: A list of package names that are about to change.
        :param buildroot_tag_ids: A list of build tag id's that contribute to this image's build root.
        :return: (<bool>, messsage). If True, the image might need to be rebuilt -- the message will say
                why. If False, message will be None.
        """

        dgk = self.distgit_key
        runtime = self.runtime

        rpm_entries_sets = {
        }  # Each key is an archive image id. Each value is a query of that archive's RPMs.
        with runtime.pooled_koji_client_session() as koji_api:

            image_build = self.get_latest_build(default='')
            if not image_build:
                # Seems this have never been built. Mark it as needing change.
                return True, 'Image has never been built before'

            self.logger.debug(f'Image {dgk} latest is {image_build}')

            image_nvr = image_build['nvr']
            image_build_event_id = image_build[
                'creation_event_id']  # the brew event that created this build

            self.logger.info(
                f'Running an change assessment on {image_nvr} built at event {image_build_event_id}'
            )

            # Very rarely, an image might need to pull an package that is not actually installed in the
            # builder image or in the final image.
            # e.g. https://github.com/openshift/ironic-ipa-downloader/blob/999c80f17472d5dbbd4775d901e1be026b239652/Dockerfile.ocp#L11-L14
            # This is programmatically undetectable through koji queries. So we allow extra scan-sources hints to
            # be placed in the image metadata.
            if self.config.scan_sources.extra_packages is not Missing:
                for package_details in self.config.scan_sources.extra_packages:
                    extra_package_name = package_details.name
                    extra_package_brew_tag = package_details.tag
                    # Example output: https://gist.github.com/jupierce/3bbc8be7265348a8f549d401664c9972
                    extra_latest_tagging_infos = koji_api.tagHistory(
                        package=extra_package_name,
                        tag=extra_package_brew_tag,
                        queryOpts={'limit': 1})
                    if not extra_latest_tagging_infos:
                        self.logger.warning(
                            f'Unable to find tagging event for for extra_packages {extra_package_name}'
                        )
                        continue
                    # Otherwise, we have information about the most recent time this package was tagged into the
                    # relevant tag. Why the tagging event and not the build time? Well, the build could have been
                    # made long ago, but only tagged into the relevant tag recently.
                    extra_latest_tagging_event = extra_latest_tagging_infos[0][
                        'create_event']
                    self.logger.debug(
                        f'Checking image creation time against extra_packages {extra_package_name} in tag {extra_package_brew_tag} @ tagging event {extra_latest_tagging_event}'
                    )
                    if extra_latest_tagging_event > image_build_event_id:
                        return True, f'Image {dgk} is sensitive to extra_packages {extra_package_name} which changed at event {extra_latest_tagging_event}'

            # Collect build times from any builder images used to create this image
            builders = self.config['from'].builder or []
            for builder in builders:
                if builder.member:
                    # We can't determine if images are about to change. Defer to scan-sources.
                    continue

                if builder.image:
                    builder_image_name = builder.image
                elif builder.stream:
                    builder_image_name = runtime.resolve_stream(
                        builder.stream).image
                else:
                    raise IOError(
                        f'Unable to determine builder image pullspec from {builder}'
                    )

                # builder_image_name example: "openshift/ose-base:ubi8"
                brew_image_url = self.runtime.resolve_brew_image_url(
                    builder_image_name)
                builder_brew_build = ImageMetadata.builder_image_builds.get(
                    brew_image_url, None)

                if not builder_brew_build:
                    out, err = exectools.cmd_assert(
                        f'oc image info {brew_image_url} --filter-by-os amd64 -o=json',
                        retries=5,
                        pollrate=10)
                    latest_builder_image_info = Model(
                        json.loads(out, encoding='utf-8'))
                    builder_info_labels = latest_builder_image_info.config.config.Labels
                    builder_nvr_list = [
                        builder_info_labels['com.redhat.component'],
                        builder_info_labels['version'],
                        builder_info_labels['release']
                    ]

                    if not all(builder_nvr_list):
                        raise IOError(
                            f'Unable to find nvr in {builder_info_labels}')

                    builder_image_nvr = '-'.join(builder_nvr_list)
                    builder_brew_build = koji_api.getBuild(builder_image_nvr)
                    ImageMetadata.builder_image_builds[
                        brew_image_url] = builder_brew_build
                    self.logger.debug(
                        f'Found that builder image {brew_image_url} has event {builder_brew_build}'
                    )

                if image_build_event_id < builder_brew_build[
                        'creation_event_id']:
                    self.logger.info(
                        f'will be rebuilt because a builder image changed: {builder_image_name}'
                    )
                    return True, f'A builder image {builder_image_name} has changed since {image_nvr} was built'

            build_root_changes = brew.tags_changed_since_build(
                runtime, koji_api, image_build, buildroot_tag_ids)
            if build_root_changes:
                changing_tag_names = [
                    brc['tag_name'] for brc in build_root_changes
                ]
                self.logger.info(
                    f'Image will be rebuilt due to buildroot change since {image_nvr} (last build event={image_build_event_id}). Build root changes changes: [{changing_tag_names}]'
                )
                self.logger.debug(
                    f'Image will be rebuilt due to buildroot change since ({image_build}) (last build event={image_build_event_id}). Build root changes: {build_root_changes}'
                )
                return True, f'Buildroot tag changes in [{changing_tag_names}] since {image_nvr}'

            archives = koji_api.listArchives(image_build['id'])

            # Compare to the arches in runtime
            build_arches = {a['extra']['image']['arch'] for a in archives}
            target_arches = set(self.get_arches())
            if target_arches != build_arches:
                # The latest brew build does not exactly match the required arches as specified in group.yml
                return True, f'Arches of {image_nvr}: ({build_arches}) does not match target arches {target_arches}'

            for archive in archives:
                # Example results of listing RPMs in an given imageID:
                # https://gist.github.com/jupierce/a8798858104dcf6dfa4bd1d6dd99d2d8
                archive_id = archive['id']
                rpm_entries = koji_api.listRPMs(imageID=archive_id)
                rpm_entries_sets[archive_id] = rpm_entries

        # Leave the pooled koji context manager, so the following method can't deadlock waiting for an api.
        # The method we call inside also uses the pool.
        for archive_id, rpm_entries in rpm_entries_sets.items():
            self.logger.info(
                f'Checking whether one of my latest archives ({archive_id}) has any of its {len(rpm_entries)} rpms associated with a package that has been tagged in relevant tag since this image\'s build brew event {image_build_event_id}'
            )
            rpm_nvrs = [entry['nvr'] for entry in rpm_entries]
            self.logger.debug(f'RPMS from {archive_id}: {rpm_nvrs}')
            changes_res = runtime.parallel_exec(
                f=lambda idx, terminate_event:
                is_image_older_than_package_build_tagging(
                    self, image_build_event_id, idx, rpm_entries,
                    changing_rpm_packages, eldest_image_event_ts),
                args=range(len(rpm_entries)),
                n_threads=20)

            for changed, msg in changes_res.get():
                if changed:
                    return True, msg

        return False, None
Exemplo n.º 25
0
    def test_needs_rebuild_with_upstream(self, mock_cmd_assert):
        runtime = self.runtime
        meta = self.meta
        koji_mock = self.koji_mock
        now = datetime.datetime.now(datetime.timezone.utc)
        then = now - datetime.timedelta(hours=5)

        def list_builds(packageID=None, state=None, pattern=None, queryOpts=None, **kwargs):
            return self._list_builds(builds, packageID=packageID, state=state, pattern=pattern, queryOpts=queryOpts)

        runtime.downstream_commitish_overrides = {}
        koji_mock.listBuilds.side_effect = list_builds
        ls_remote_commit = '296ac244f3e7fd2d937316639892f90f158718b0'

        meta.config.content = Model(dict_to_model={
            'source': {
                'git': {
                    'url': '[email protected]:openshift/release.git',
                }
            }
        })

        # If listBuilds returns nothing, we want to trigger a rebuild
        builds = []
        self.assertEqual(meta.needs_rebuild().code, RebuildHintCode.NO_LATEST_BUILD)

        # Make sure irrelevant builds are ignored
        builds = [
            self.build_record(now, assembly='not_ours'),
            self.build_record(now, assembly=runtime.assembly, build_state=BuildStates.FAILED),
            self.build_record(now, assembly=runtime.assembly, git_commit=ls_remote_commit, build_state=BuildStates.FAILED),
            self.build_record(now, assembly=f'{runtime.assembly}extra', git_commit=ls_remote_commit)  # Close but not quite our assembly
        ]
        self.assertEqual(meta.needs_rebuild().code, RebuildHintCode.NO_LATEST_BUILD)

        meta.cgit_atom_feed = Mock()
        meta.cgit_atom_feed.return_value = [
            CgitAtomFeedEntry(title='', content='', updated=then, id='1234567')
        ]

        # In this scenario, we have a build newer than distgit's commit, but it's git.<> release
        # component does not match the current upstream ls-remote commit. This means there is
        # a new build required.
        builds = [
            self.build_record(now, assembly=runtime.assembly, git_commit='abcdefg')
        ]
        self.assertEqual(meta.needs_rebuild().code, RebuildHintCode.NEW_UPSTREAM_COMMIT)

        # In this scenario, we have a build newer than distgit's commit. It's git.<> release
        # component matches the ls-remote value. No new build required.
        builds = [
            self.build_record(now, assembly='not_ours'),
            self.build_record(now, assembly='not_ours', git_commit=ls_remote_commit),
            self.build_record(now, assembly=runtime.assembly, build_state=BuildStates.FAILED),
            self.build_record(now, assembly=runtime.assembly, git_commit=ls_remote_commit, build_state=BuildStates.FAILED),
            self.build_record(now, assembly=runtime.assembly, git_commit=ls_remote_commit)  # This one should match perfectly
        ]
        self.assertEqual(meta.needs_rebuild().code, RebuildHintCode.BUILD_IS_UP_TO_DATE)

        # We should also accept the 'stream' assembly
        builds = [
            self.build_record(now, assembly='not_ours'),
            self.build_record(now, assembly=runtime.assembly, build_state=BuildStates.FAILED),
            self.build_record(now, assembly=runtime.assembly, git_commit=ls_remote_commit, build_state=BuildStates.FAILED),
            self.build_record(now, assembly='stream', git_commit=ls_remote_commit)  # This one should match perfectly
        ]
        self.assertEqual(meta.needs_rebuild().code, RebuildHintCode.BUILD_IS_UP_TO_DATE)

        # If we tried the upstream commit recently and failed, there should be a delay before the next attempt
        builds = [
            self.build_record(now, assembly='not_ours'),
            self.build_record(now - datetime.timedelta(days=5), git_commit='1234567', assembly=runtime.assembly, build_state=BuildStates.COMPLETE),
            self.build_record(now, assembly=runtime.assembly, git_commit=ls_remote_commit, build_state=BuildStates.FAILED),
        ]
        self.assertEqual(meta.needs_rebuild().code, RebuildHintCode.DELAYING_NEXT_ATTEMPT)

        # If the failed build attempt is old, try again
        builds = [
            self.build_record(now, assembly='not_ours'),
            self.build_record(now - datetime.timedelta(days=5), git_commit='1234567', assembly=runtime.assembly, build_state=BuildStates.COMPLETE),
            self.build_record(now - datetime.timedelta(days=5), assembly=runtime.assembly, git_commit=ls_remote_commit, build_state=BuildStates.FAILED),
        ]
        self.assertEqual(meta.needs_rebuild().code, RebuildHintCode.LAST_BUILD_FAILED)

        # Scenario where the latest build has a commit that does not agree with current upstream commit
        # but there is an old build that does. Indicates some type of revert. Rebuild.
        builds = [
            self.build_record(now, assembly='not_ours'),
            self.build_record(now, git_commit='1234567', assembly=runtime.assembly, build_state=BuildStates.COMPLETE),
            self.build_record(now - datetime.timedelta(days=5), assembly=runtime.assembly, git_commit=ls_remote_commit),
        ]
        self.assertEqual(meta.needs_rebuild().code, RebuildHintCode.UPSTREAM_COMMIT_MISMATCH)

        # The preceding revert scenario only applies if the assemblies match
        builds = [
            self.build_record(now, assembly='not_ours'),
            self.build_record(now, git_commit='1234567', assembly='stream', build_state=BuildStates.COMPLETE),
            self.build_record(now - datetime.timedelta(days=5), assembly=runtime.assembly, git_commit=ls_remote_commit),
        ]
        self.assertEqual(meta.needs_rebuild().code, RebuildHintCode.BUILD_IS_UP_TO_DATE)

        # If both builds are 'stream' assembly, the upstream commit revert DOES affect our assembly
        builds = [
            self.build_record(now, assembly='not_ours'),
            self.build_record(now, git_commit='1234567', assembly='stream', build_state=BuildStates.COMPLETE),
            self.build_record(now - datetime.timedelta(days=5), assembly='stream', git_commit=ls_remote_commit),
        ]
        self.assertEqual(meta.needs_rebuild().code, RebuildHintCode.UPSTREAM_COMMIT_MISMATCH)

        # If there is any build of our assembly, it does not matter if there is one from stream; only use
        # one specific to our assembly. In this case, our last assembly specific build does not have the
        # right upstream commit.
        builds = [
            self.build_record(now, assembly='not_ours'),
            self.build_record(now - datetime.timedelta(days=5), git_commit=ls_remote_commit, assembly='stream', build_state=BuildStates.COMPLETE),
            self.build_record(now, assembly=runtime.assembly, git_commit='123457'),
        ]
        self.assertEqual(meta.needs_rebuild().code, RebuildHintCode.UPSTREAM_COMMIT_MISMATCH)