示例#1
0
文件: utils.py 项目: pbortlov/iib
def get_binary_image_from_config(ocp_version,
                                 distribution_scope,
                                 binary_image_config={}):
    """
    Determine the binary image to be used to build the index image.

    :param str ocp_version: the ocp_version label value of the index image.
    :param str distribution_scope: the distribution_scope label value of the index image.
    :param dict binary_image_config: the dict of config required to identify the appropriate
        ``binary_image`` to use.
    :return: pull specification of the binary_image to be used for this build.
    :rtype: str
    :raises IIBError: when the config value for the ocp_version and distribution_scope is missing.
    """
    binary_image = binary_image_config.get(distribution_scope,
                                           {}).get(ocp_version, None)
    if not binary_image:
        raise IIBError(
            'IIB does not have a configured binary_image for'
            f' distribution_scope : {distribution_scope} and ocp_version: {ocp_version}.'
            ' Please specify a binary_image value in the request.')

    return binary_image
示例#2
0
def test_handle_add_request_bundle_resolution_failure(mock_grb, mock_srs, mock_cleanup):
    error_msg = 'Bundle Resolution failure!'
    mock_grb.side_effect = IIBError(error_msg)
    bundles = ['some-bundle:2.3-1']
    cnr_token = 'token'
    organization = 'org'
    greenwave_config = {'some_key': 'other_value'}
    with pytest.raises(IIBError, match=error_msg):
        build.handle_add_request(
            bundles,
            'binary-image:latest',
            3,
            'from-index:latest',
            ['s390x'],
            cnr_token,
            organization,
            False,
            None,
            greenwave_config,
        )
    mock_cleanup.assert_called_once_with()
    mock_srs.assert_called_once()
    mock_grb.assert_called_once_with(bundles)
示例#3
0
def test_copy_files_from_image(mock_podman_pull, mock_run_cmd, fail_rm):
    image = 'bundle-image:latest'
    src_path = '/manifests'
    dest_path = '/destination/path/manifests'

    container_id = 'df2ff736efeaff598330a128b3dc4875caf254d9f416cefd86ec009b74d1488b'

    side_effect = [f'{container_id}\n', '']
    if fail_rm:
        side_effect.append(IIBError('Uh oh! Something went wrong.'))
    else:
        side_effect.append('')
    mock_run_cmd.side_effect = side_effect

    build._copy_files_from_image(image, src_path, dest_path)
    mock_podman_pull.assert_called_once()

    mock_run_cmd.assert_has_calls([
        mock.call(['podman', 'create', image, 'unused'], exc_msg=mock.ANY),
        mock.call(['podman', 'cp', f'{container_id}:{src_path}', dest_path],
                  exc_msg=mock.ANY),
        mock.call(['podman', 'rm', container_id], exc_msg=mock.ANY),
    ])
示例#4
0
def _validate_distribution_scope(resolved_distribution_scope, distribution_scope):
    """
    Validate distribution scope is allowed to be updated.

    :param str resolved_distribution_scope: the distribution_scope that the index is for.
    :param str distribution_scope: the distribution scope that has been requested for
        the index image.
    :return: the valid distribution scope
    :rtype: str
    :raises IIBError: if the ``resolved_distribution_scope`` is of lesser scope than
        ``distribution_scope``
    """
    if not distribution_scope:
        return resolved_distribution_scope

    scopes = ["dev", "stage", "prod"]
    # Make sure the request isn't regressing the distribution scope
    if scopes.index(distribution_scope) > scopes.index(resolved_distribution_scope):
        raise IIBError(
            f'Cannot set "distribution_scope" to {distribution_scope} because from index is'
            f' already set to {resolved_distribution_scope}'
        )
    return distribution_scope
示例#5
0
def get_image_arches(pull_spec):
    """
    Get the architectures this image was built for.

    :param str pull_spec: the pull specification to a v2 manifest list
    :return: a set of architectures of the container images contained in the manifest list
    :rtype: set
    :raises IIBError: if the pull specification is not a v2 manifest list
    """
    log.debug('Get the available arches for %s', pull_spec)
    skopeo_raw = skopeo_inspect(f'docker://{pull_spec}', '--raw')
    arches = set()
    if skopeo_raw.get('mediaType') == 'application/vnd.docker.distribution.manifest.list.v2+json':
        for manifest in skopeo_raw['manifests']:
            arches.add(manifest['platform']['architecture'])
    elif skopeo_raw.get('mediaType') == 'application/vnd.docker.distribution.manifest.v2+json':
        skopeo_out = skopeo_inspect(f'docker://{pull_spec}', '--config')
        arches.add(skopeo_out['architecture'])
    else:
        raise IIBError(
            f'The pull specification of {pull_spec} is neither a v2 manifest list nor a v2 manifest'
        )

    return arches
示例#6
0
文件: build.py 项目: fromanirh/iib
def _get_resolved_bundles(bundles):
    """
    Get the pull specification of the bundle images using their digests.

    Determine if the pull spec refers to a manifest list.
    If so, simply use the digest of the first item in the manifest list.
    If not a manifest list, it must be a v2s2 image manifest and should be used as it is.

    :param list bundles: the list of bundle images to be resolved.
    :return: the list of bundle images resolved to their digests.
    :rtype: list
    :raises IIBError: if unable to resolve a bundle image.
    """
    log.info('Resolving bundles %s', ', '.join(bundles))
    resolved_bundles = set()
    for bundle_pull_spec in bundles:
        skopeo_raw = skopeo_inspect(f'docker://{bundle_pull_spec}', '--raw')
        if (skopeo_raw.get('mediaType') ==
                'application/vnd.docker.distribution.manifest.list.v2+json'):
            # Get the digest of the first item in the manifest list
            digest = skopeo_raw['manifests'][0]['digest']
            name = _get_container_image_name(bundle_pull_spec)
            resolved_bundles.add(f'{name}@{digest}')
        elif (skopeo_raw.get('mediaType')
              == 'application/vnd.docker.distribution.manifest.v2+json'
              and skopeo_raw.get('schemaVersion') == 2):
            resolved_bundles.add(_get_resolved_image(bundle_pull_spec))
        else:
            error_msg = (
                f'The pull specification of {bundle_pull_spec} is neither '
                f'a v2 manifest list nor a v2s2 manifest. Type {skopeo_raw.get("mediaType")}'
                f' and schema version {skopeo_raw.get("schemaVersion")} is not supported by IIB.'
            )
            raise IIBError(error_msg)

    return list(resolved_bundles)
示例#7
0
def _push_package_manifest(package_dir, cnr_token, organization):
    """
    Push ``manifests.zip`` file created for an exported package to OMPS.

    :param str package_dir: path to the exported package directory.
    :param str cnr_token: the token required to push backported packages to the legacy
        app registry via OMPS.
    :param str organization: the organization name in the legacy app registry to which
         the backported packages should be pushed to.
    :return: Response from OMPS in JSON format
    :rtype: dict|None
    :raises IIBError: if the push fails
    """
    conf = get_worker_config()
    base_dir, _ = _get_base_dir_and_pkg_name(package_dir)
    with open(f'{base_dir}/manifests.zip', 'rb') as fobj:
        files = {'file': (fobj.name, fobj)}
        log.info('Files are %s', files)
        resp = requests.post(
            f'{conf["iib_omps_url"]}{organization}/zipfile',
            headers={'Authorization': cnr_token},
            files=files,
        )
        if not resp.ok:
            log.error('Request to OMPS failed: %s', resp.text)
            try:
                error_msg = resp.json().get('message',
                                            'An unknown error occured')
            except json.JSONDecodeError:
                error_msg = resp.text
            raise IIBError(
                f'Push to {organization} in the legacy app registry was unsucessful: {error_msg}'
            )

        log.info('OMPS response: %s', resp.text)
        return resp.json()
示例#8
0
文件: build.py 项目: fromanirh/iib
def _adjust_operator_bundle(manifests_path, metadata_path, organization=None):
    """
    Apply modifications to the operator manifests at the given location.

    For any container image pull spec found in the Operator CSV files, replace floating
    tags with pinned digests, e.g. `image:latest` becomes `image@sha256:...`.

    If spec.relatedImages is not set, it will be set with the pinned digests. If it is set but
    there are also RELATED_IMAGE_* environment variables set, an exception will be raised.

    This method relies on the OperatorManifest class to properly identify and apply the
    modifications as needed.

    :param str manifests_path: the full path to the directory containing the operator manifests.
    :param str metadata_path: the full path to the directory containing the bundle metadata files.
    :param str organization: the organization this bundle is for. If no organization is provided,
        no custom behavior will be applied.
    :raises IIBError: if the operator manifest has invalid entries
    :return: a dictionary of labels to set on the bundle
    :rtype: dict
    """
    package_name, labels = _apply_package_name_suffix(metadata_path,
                                                      organization)

    operator_manifest = OperatorManifest.from_directory(manifests_path)
    found_pullspecs = set()
    operator_csvs = []
    for operator_csv in operator_manifest.files:
        if operator_csv.has_related_images():
            csv_file_name = os.path.basename(operator_csv.path)
            if operator_csv.has_related_image_envs():
                raise IIBError(
                    f'The ClusterServiceVersion file {csv_file_name} has entries in '
                    'spec.relatedImages and one or more containers have RELATED_IMAGE_* '
                    'environment variables set. This is not allowed for bundles regenerated with '
                    'IIB.')
            log.debug(
                'Skipping pinning since the ClusterServiceVersion file %s has entries in '
                'spec.relatedImages',
                csv_file_name,
            )
            continue

        operator_csvs.append(operator_csv)

        for pullspec in operator_csv.get_pullspecs():
            found_pullspecs.add(pullspec)

    conf = get_worker_config()
    registry_replacements = (conf['iib_organization_customizations'].get(
        organization, {}).get('registry_replacements', {}))

    # Resolve pull specs to container image digests
    replacement_pullspecs = {}
    for pullspec in found_pullspecs:
        replacement_needed = False
        if ':' not in ImageName.parse(pullspec).tag:
            replacement_needed = True

        # Always resolve the image to make sure it's valid
        resolved_image = ImageName.parse(_get_resolved_image(
            pullspec.to_str()))

        if registry_replacements.get(resolved_image.registry):
            replacement_needed = True
            resolved_image.registry = registry_replacements[
                resolved_image.registry]

        if replacement_needed:
            log.debug(
                '%s will be replaced with %s',
                pullspec,
                resolved_image.to_str(),
            )
            replacement_pullspecs[pullspec] = resolved_image

    # Apply modifications to the operator bundle image metadata
    for operator_csv in operator_csvs:
        csv_file_name = os.path.basename(operator_csv.path)
        log.info('Replacing the pull specifications on %s', csv_file_name)
        operator_csv.replace_pullspecs_everywhere(replacement_pullspecs)

        log.info('Setting spec.relatedImages on %s', csv_file_name)
        operator_csv.set_related_images()

        operator_csv.dump()

    if organization:
        _adjust_csv_annotations(operator_manifest.files, package_name,
                                organization)

    return labels
示例#9
0
文件: build.py 项目: fromanirh/iib
def _apply_package_name_suffix(metadata_path, organization=None):
    """
    Add the package name suffix if configured for this organization.

    This adds the suffix to the value of
    ``annotations['operators.operatorframework.io.bundle.package.v1']`` in
    ``metadata/annotations.yaml``.

    The final package name value is returned as part of the tuple.

    :param str metadata_path: the path to the bundle's metadata directory.
    :param str organization: the organization this customization is for.
    :raise IIBError: if the ``metadata/annotations.yaml`` file is in an unexpected format.
    :return: a tuple with the package name and a dictionary of labels to set on the bundle.
    :rtype: tuple(str, dict)
    """
    annotations_yaml_path = os.path.join(metadata_path, 'annotations.yaml')
    if not os.path.exists(annotations_yaml_path):
        raise IIBError(
            'metadata/annotations.yaml does not exist in the bundle')

    with open(annotations_yaml_path, 'r') as f:
        try:
            annotations_yaml = yaml.load(f)
        except ruamel.yaml.YAMLError:
            error = 'metadata/annotations/yaml is not valid YAML'
            log.exception(error)
            raise IIBError(error)

    if not isinstance(annotations_yaml.get('annotations', {}), dict):
        raise IIBError(
            'The value of metadata/annotations.yaml must be a dictionary')

    package_label = 'operators.operatorframework.io.bundle.package.v1'
    package_annotation = annotations_yaml.get('annotations',
                                              {}).get(package_label)
    if not package_annotation:
        raise IIBError(
            f'{package_label} is not set in metadata/annotations.yaml')

    if not isinstance(package_annotation, str):
        raise IIBError(
            f'The value of {package_label} in metadata/annotations.yaml is not a string'
        )

    if not organization:
        log.debug(
            'No organization was provided to add the package name suffix')
        return package_annotation, {}

    conf = get_worker_config()
    package_name_suffix = (conf['iib_organization_customizations'].get(
        organization, {}).get('package_name_suffix'))
    if not package_name_suffix:
        log.debug(
            'The "package_name_suffix" configuration is not set for the organization %s',
            organization,
        )
        return package_annotation, {}

    if package_annotation.endswith(package_name_suffix):
        log.debug(
            'No modifications are needed on %s in metadata/annotations.yaml',
            package_label)
        return package_annotation, {}

    annotations_yaml['annotations'][
        package_label] = f'{package_annotation}{package_name_suffix}'

    with open(annotations_yaml_path, 'w') as f:
        yaml.dump(annotations_yaml, f)

    log.info(
        'Modified %s in metadata/annotations.yaml from %s to %s',
        package_label,
        package_annotation,
        annotations_yaml['annotations'][package_label],
    )

    return (
        annotations_yaml['annotations'][package_label],
        {
            package_label: annotations_yaml['annotations'][package_label]
        },
    )
示例#10
0
def _add_bundles_missing_in_source(
    source_index_bundles,
    target_index_bundles,
    base_dir,
    binary_image,
    source_from_index,
    request_id,
    arch,
    ocp_version,
    overwrite_target_index_token=None,
    distribution_scope=None,
):
    """
    Rebuild index image with bundles missing from source image but present in target image.

    If no bundles are missing in the source index image, the index image is still rebuilt
    using the new binary image.

    :param list source_index_bundles: bundles present in the source index image.
    :param list target_index_bundles: bundles present in the target index image.
    :param str base_dir: base directory where operation files will be located.
    :param str binary_image: binary image to be used by the new index image.
    :param str source_from_index: index image, whose data will be contained in the new index image.
    :param int request_id: the ID of the IIB build request.
    :param str arch: the architecture to build this image for.
    :param str ocp_version: ocp version which will be added as a label to the image.
    :param str overwrite_target_index_token: the token used for overwriting the input
        ``source_from_index`` image. This is required for non-privileged users to use
        ``overwrite_target_index``. The format of the token must be in the format "user:password".
    :return: bundles which were added to the index image.
    :rtype: list
    """
    set_request_state(request_id, 'in_progress',
                      'Adding bundles missing in source index image')
    log.info(
        'Adding bundles from target index image which are missing from source index image'
    )
    missing_bundles = []
    source_bundle_digests = []
    source_bundle_csv_names = []
    target_bundle_digests = []

    for bundle in source_index_bundles:
        if '@sha256:' in bundle['bundlePath']:
            source_bundle_digests.append(
                bundle['bundlePath'].split('@sha256:')[-1])
            source_bundle_csv_names.append(bundle['csvName'])
        else:
            raise IIBError(
                f'Bundle {bundle["bundlePath"]} in the source index image is not defined via digest'
            )
    for bundle in target_index_bundles:
        if '@sha256:' in bundle['bundlePath']:
            target_bundle_digests.append(
                (bundle['bundlePath'].split('@sha256:')[-1], bundle))
        else:
            raise IIBError(
                f'Bundle {bundle["bundlePath"]} in the target index image is not defined via digest'
            )

    for target_bundle_digest, bundle in target_bundle_digests:
        if (target_bundle_digest not in source_bundle_digests
                and bundle['csvName'] not in source_bundle_csv_names):
            missing_bundles.append(bundle)

    missing_bundle_paths = [bundle['bundlePath'] for bundle in missing_bundles]
    if missing_bundle_paths:
        log.info('%s bundles are missing in the source index image.',
                 len(missing_bundle_paths))
    else:
        log.info(
            'No bundles are missing in the source index image. However, the index image is '
            'still being rebuilt with the new binary image.')

    _opm_index_add(
        base_dir,
        missing_bundle_paths,
        binary_image,
        source_from_index,
        overwrite_target_index_token,
    )
    _add_label_to_index('com.redhat.index.delivery.version', ocp_version,
                        base_dir, 'index.Dockerfile')
    _add_label_to_index(
        'com.redhat.index.delivery.distribution_scope',
        distribution_scope,
        base_dir,
        'index.Dockerfile',
    )
    _build_image(base_dir, 'index.Dockerfile', request_id, arch)
    _push_image(request_id, arch)
    _create_and_push_manifest_list(request_id, [arch])
    log.info('New index image created')

    return missing_bundles
示例#11
0
def _prepare_request_for_build(binary_image,
                               request_id,
                               from_index=None,
                               add_arches=None,
                               bundles=None):
    """
    Prepare the request for the index image build.

    All information that was retrieved and/or calculated for the next steps in the build are
    returned as a dictionary.

    This function was created so that code didn't need to be duplicated for the ``add`` and ``rm``
    request types.

    :param str binary_image: the pull specification of the container image where the opm binary
        gets copied from.
    :param int request_id: the ID of the IIB build request
    :param str from_index: the pull specification of the container image containing the index that
        the index image build will be based from.
    :param list add_arches: the list of arches to build in addition to the arches ``from_index`` is
        currently built for; if ``from_index`` is ``None``, then this is used as the list of arches
        to build the index image for
    :param list bundles: the list of bundles to create the bundle mapping on the request
    :return: a dictionary with the keys: arches, binary_image_resolved, and from_index_resolved.
    :raises IIBError: if the container image resolution fails or the architectures couldn't be
        detected.
    """
    if bundles is None:
        bundles = []

    set_request_state(request_id, 'in_progress',
                      'Resolving the container images')

    if add_arches:
        arches = set(add_arches)
    else:
        arches = set()

    binary_image_resolved = _get_resolved_image(binary_image)
    binary_image_arches = _get_image_arches(binary_image_resolved)

    if from_index:
        from_index_resolved = _get_resolved_image(from_index)
        from_index_arches = _get_image_arches(from_index_resolved)
        arches = arches | from_index_arches
    else:
        from_index_resolved = None

    if not arches:
        raise IIBError('No arches were provided to build the index image')

    arches_str = ', '.join(sorted(arches))
    log.debug('Set to build the index image for the following arches: %s',
              arches_str)

    if not arches.issubset(binary_image_arches):
        raise IIBError(
            'The binary image is not available for the following arches: {}'.
            format(', '.join(sorted(arches - binary_image_arches))))

    bundle_mapping = {}
    for bundle in bundles:
        operator = get_image_label(
            bundle, 'operators.operatorframework.io.bundle.package.v1')
        if operator:
            bundle_mapping.setdefault(operator, []).append(bundle)

    payload = {
        'binary_image_resolved':
        binary_image_resolved,
        'state':
        'in_progress',
        'state_reason':
        f'Building the index image for the following arches: {arches_str}',
    }
    if bundle_mapping:
        payload['bundle_mapping'] = bundle_mapping
    if from_index_resolved:
        payload['from_index_resolved'] = from_index_resolved
    exc_msg = 'Failed setting the resolved images on the request'
    update_request(request_id, payload, exc_msg)

    return {
        'arches': arches,
        'binary_image_resolved': binary_image_resolved,
        'from_index_resolved': from_index_resolved,
    }
示例#12
0
def _add_bundles_missing_in_source(
    source_index_bundles,
    target_index_bundles,
    base_dir,
    binary_image,
    source_from_index,
    request_id,
    arch,
    ocp_version,
    overwrite_target_index_token=None,
    distribution_scope=None,
):
    """
    Rebuild index image with bundles missing from source image but present in target image.

    If no bundles are missing in the source index image, the index image is still rebuilt
    using the new binary image.

    :param list source_index_bundles: bundles present in the source index image.
    :param list target_index_bundles: bundles present in the target index image.
    :param str base_dir: base directory where operation files will be located.
    :param str binary_image: binary image to be used by the new index image.
    :param str source_from_index: index image, whose data will be contained in the new index image.
    :param int request_id: the ID of the IIB build request.
    :param str arch: the architecture to build this image for.
    :param str ocp_version: ocp version which will be added as a label to the image.
    :param str overwrite_target_index_token: the token used for overwriting the input
        ``source_from_index`` image. This is required to use ``overwrite_target_index``.
        The format of the token must be in the format "user:password".
    :return: tuple where the first value is a list of bundles which were added to the index image
        and the second value is a list of bundles in the new index whose ocp_version range does not
        satisfy the ocp_version value of the target index.
    :rtype: tuple
    """
    set_request_state(request_id, 'in_progress',
                      'Adding bundles missing in source index image')
    log.info(
        'Adding bundles from target index image which are missing from source index image'
    )
    missing_bundles = []
    missing_bundle_paths = []
    # This list stores the bundles whose ocp_version range does not satisfy the ocp_version
    # of the target index
    invalid_bundles = []
    source_bundle_digests = []
    source_bundle_csv_names = []
    target_bundle_digests = []

    for bundle in source_index_bundles:
        if '@sha256:' in bundle['bundlePath']:
            source_bundle_digests.append(
                bundle['bundlePath'].split('@sha256:')[-1])
            source_bundle_csv_names.append(bundle['csvName'])
        else:
            raise IIBError(
                f'Bundle {bundle["bundlePath"]} in the source index image is not defined via digest'
            )
    for bundle in target_index_bundles:
        if '@sha256:' in bundle['bundlePath']:
            target_bundle_digests.append(
                (bundle['bundlePath'].split('@sha256:')[-1], bundle))
        else:
            raise IIBError(
                f'Bundle {bundle["bundlePath"]} in the target index image is not defined via digest'
            )

    for target_bundle_digest, bundle in target_bundle_digests:
        if (target_bundle_digest not in source_bundle_digests
                and bundle['csvName'] not in source_bundle_csv_names):
            missing_bundles.append(bundle)
            missing_bundle_paths.append(bundle['bundlePath'])

    for bundle in itertools.chain(missing_bundles, source_index_bundles):
        if not is_bundle_version_valid(bundle['bundlePath'], ocp_version):
            invalid_bundles.append(bundle)

    if invalid_bundles:
        log.info(
            '%s bundles have invalid version label and will be deprecated.',
            len(invalid_bundles))

    _opm_index_add(
        base_dir,
        missing_bundle_paths,
        binary_image,
        from_index=source_from_index,
        overwrite_from_index_token=overwrite_target_index_token,
        # Use podman until opm's default mechanism is more resilient:
        #   https://bugzilla.redhat.com/show_bug.cgi?id=1937097
        container_tool='podman',
    )
    _add_label_to_index('com.redhat.index.delivery.version', ocp_version,
                        base_dir, 'index.Dockerfile')
    _add_label_to_index(
        'com.redhat.index.delivery.distribution_scope',
        distribution_scope,
        base_dir,
        'index.Dockerfile',
    )
    _build_image(base_dir, 'index.Dockerfile', request_id, arch)
    _push_image(request_id, arch)
    _create_and_push_manifest_list(request_id, [arch])
    log.info('New index image created')

    return (missing_bundles, invalid_bundles)
示例#13
0
def _adjust_operator_bundle(
    manifests_path, metadata_path, request_id, organization=None, pinned_by_iib=False
):
    """
    Apply modifications to the operator manifests at the given location.

    For any container image pull spec found in the Operator CSV files, replace floating
    tags with pinned digests, e.g. `image:latest` becomes `image@sha256:...`.

    If spec.relatedImages is not set, it will be set with the pinned digests. If it is set but
    there are also RELATED_IMAGE_* environment variables set, the relatedImages will be regenerated
    and the digests will be pinned again.

    This method relies on the OperatorManifest class to properly identify and apply the
    modifications as needed.

    :param str manifests_path: the full path to the directory containing the operator manifests.
    :param str metadata_path: the full path to the directory containing the bundle metadata files.
    :param int request_id: the ID of the IIB build request.
    :param str organization: the organization this bundle is for. If no organization is provided,
        no custom behavior will be applied.
    :param bool pinned_by_iib: whether or not the bundle image has already been processed by
        IIB to perform image pinning of related images.
    :raises IIBError: if the operator manifest has invalid entries
    :return: a dictionary of labels to set on the bundle
    :rtype: dict
    """
    try:
        operator_manifest = OperatorManifest.from_directory(manifests_path)
    except (ruamel.yaml.YAMLError, ruamel.yaml.constructor.DuplicateKeyError) as e:
        error = f'The Operator Manifest is not in a valid YAML format: {e}'
        log.exception(error)
        raise IIBError(error)

    conf = get_worker_config()
    organization_customizations = conf['iib_organization_customizations'].get(organization, [])
    if not organization_customizations:
        organization_customizations = [
            {'type': 'resolve_image_pullspecs'},
            {'type': 'related_bundles'},
            {'type': 'package_name_suffix'},
            {'type': 'registry_replacements'},
            {'type': 'image_name_from_labels'},
            {'type': 'csv_annotations'},
            {'type': 'enclose_repo'},
        ]

    annotations_yaml = _get_package_annotations(metadata_path)
    package_name = annotations_yaml['annotations'][
        'operators.operatorframework.io.bundle.package.v1'
    ]
    labels = {}

    # Perform the customizations in order
    for customization in organization_customizations:
        customization_type = customization['type']
        if customization_type == 'package_name_suffix':
            package_name_suffix = customization.get('suffix')
            if package_name_suffix:
                log.info('Applying package_name_suffix : %s', package_name_suffix)
                package_name, package_labels = _apply_package_name_suffix(
                    metadata_path, package_name_suffix
                )
                labels = {**labels, **package_labels}
        elif customization_type == 'registry_replacements':
            registry_replacements = customization.get('replacements', {})
            if registry_replacements:
                log.info('Applying registry replacements')
                bundle_metadata = _get_bundle_metadata(operator_manifest, pinned_by_iib)
                _apply_registry_replacements(bundle_metadata, registry_replacements)
        elif customization_type == 'csv_annotations' and organization:
            org_csv_annotations = customization.get('annotations')
            if org_csv_annotations:
                log.info('Applying csv annotations for organization %s', organization)
                _adjust_csv_annotations(operator_manifest.files, package_name, org_csv_annotations)
        elif customization_type == 'image_name_from_labels':
            org_image_name_template = customization.get('template', '')
            if org_image_name_template:
                bundle_metadata = _get_bundle_metadata(operator_manifest, pinned_by_iib)
                _replace_image_name_from_labels(bundle_metadata, org_image_name_template)
        elif customization_type == 'enclose_repo':
            org_enclose_repo_namespace = customization.get('namespace')
            org_enclose_repo_glue = customization.get('enclosure_glue')
            if org_enclose_repo_namespace and org_enclose_repo_glue:
                log.info(
                    'Applying enclose_repo customization with namespace %s and enclosure_glue %s'
                    ' for organizaton %s',
                    org_enclose_repo_namespace,
                    org_enclose_repo_glue,
                    organization,
                )
                bundle_metadata = _get_bundle_metadata(operator_manifest, pinned_by_iib)
                _apply_repo_enclosure(
                    bundle_metadata, org_enclose_repo_namespace, org_enclose_repo_glue
                )
        elif customization_type == 'related_bundles':
            log.info('Applying related_bundles customization')
            bundle_metadata = _get_bundle_metadata(operator_manifest, pinned_by_iib)
            _write_related_bundles_file(bundle_metadata, request_id)
        elif customization_type == 'resolve_image_pullspecs':
            log.info('Resolving image pull specs')
            bundle_metadata = _get_bundle_metadata(operator_manifest, pinned_by_iib)
            _resolve_image_pull_specs(bundle_metadata, labels, pinned_by_iib)

    return labels
示例#14
0
文件: build.py 项目: zanssa/iib
def _create_and_push_manifest_list(request_id, arches, build_tags):
    """
    Create and push the manifest list to the configured registry.

    :param int request_id: the ID of the IIB build request
    :param iter arches: an iterable of arches to create the manifest list for
    :param build_tags: list of extra tag to use for intermediate index image
    :return: the pull specification of the manifest list
    :rtype: str
    :raises IIBError: if creating or pushing the manifest list fails
    """
    buildah_manifest_cmd = ['buildah', 'manifest']
    _tags = [request_id]
    if build_tags:
        _tags += build_tags
    conf = get_worker_config()
    output_pull_specs = []
    for tag in _tags:
        output_pull_spec = conf['iib_image_push_template'].format(
            registry=conf['iib_registry'], request_id=tag
        )
        output_pull_specs.append(output_pull_spec)
        try:
            run_cmd(
                buildah_manifest_cmd + ['rm', output_pull_spec],
                exc_msg=f'Failed to remove local manifest list. {output_pull_spec} does not exist',
            )
        except IIBError as e:
            error_msg = str(e)
            if 'Manifest list not found locally.' not in error_msg:
                raise IIBError(f'Error removing local manifest list: {error_msg}')
            log.debug(
                'Manifest list cannot be removed. No manifest list %s found', output_pull_spec
            )
        log.info('Creating the manifest list %s locally', output_pull_spec)
        run_cmd(
            buildah_manifest_cmd + ['create', output_pull_spec],
            exc_msg=f'Failed to create the manifest list locally: {output_pull_spec}',
        )
        for arch in sorted(arches):
            arch_pull_spec = _get_external_arch_pull_spec(request_id, arch, include_transport=True)
            run_cmd(
                buildah_manifest_cmd + ['add', output_pull_spec, arch_pull_spec],
                exc_msg=(
                    f'Failed to add {arch_pull_spec} to the'
                    f' local manifest list: {output_pull_spec}'
                ),
            )

        log.debug('Pushing manifest list %s', output_pull_spec)
        run_cmd(
            buildah_manifest_cmd
            + [
                'push',
                '--all',
                '--format',
                'v2s2',
                output_pull_spec,
                f'docker://{output_pull_spec}',
            ],
            exc_msg=f'Failed to push the manifest list to {output_pull_spec}',
        )

    # return 1st item as it holds production tag
    return output_pull_specs[0]
示例#15
0
文件: build.py 项目: fromanirh/iib
def handle_regenerate_bundle_request(from_bundle_image, organization,
                                     request_id):
    """
    Coordinate the work needed to regenerate the operator bundle image.

    :param str from_bundle_image: the pull specification of the bundle image to be regenerated.
    :param str organization: the name of the organization the bundle should be regenerated for.
    :param int request_id: the ID of the IIB build request
    :raises IIBError: if the regenerate bundle image build fails.
    """
    _cleanup()

    set_request_state(request_id, 'in_progress', 'Resolving from_bundle_image')
    from_bundle_image_resolved = _get_resolved_image(from_bundle_image)
    arches = _get_image_arches(from_bundle_image_resolved)
    if not arches:
        raise IIBError(
            f'No arches were found in the resolved from_bundle_image {from_bundle_image_resolved}'
        )

    arches_str = ', '.join(sorted(arches))
    log.debug(
        'Set to regenerate the bundle image for the following arches: %s',
        arches_str)

    payload = {
        'from_bundle_image_resolved':
        from_bundle_image_resolved,
        'state':
        'in_progress',
        'state_reason':
        f'Regenerating the bundle image for the following arches: {arches_str}',
    }
    exc_msg = 'Failed setting the resolved "from_bundle_image" on the request'
    update_request(request_id, payload, exc_msg=exc_msg)

    # Pull the from_bundle_image to ensure steps later on don't fail due to registry timeouts
    podman_pull(from_bundle_image_resolved)

    with tempfile.TemporaryDirectory(prefix='iib-') as temp_dir:
        manifests_path = os.path.join(temp_dir, 'manifests')
        _copy_files_from_image(from_bundle_image_resolved, '/manifests',
                               manifests_path)
        metadata_path = os.path.join(temp_dir, 'metadata')
        _copy_files_from_image(from_bundle_image_resolved, '/metadata',
                               metadata_path)
        labels = _adjust_operator_bundle(manifests_path, metadata_path,
                                         organization)

        with open(os.path.join(temp_dir, 'Dockerfile'), 'w') as dockerfile:
            dockerfile.write(
                textwrap.dedent(f"""\
                        FROM {from_bundle_image_resolved}
                        COPY ./manifests /manifests
                        COPY ./metadata /metadata
                    """))
            for name, value in labels.items():
                dockerfile.write(f'LABEL {name}={value}\n')

        for arch in sorted(arches):
            _build_image(temp_dir, 'Dockerfile', request_id, arch)
            _push_image(request_id, arch)

    set_request_state(request_id, 'in_progress', 'Creating the manifest list')
    output_pull_spec = _create_and_push_manifest_list(request_id, arches)

    conf = get_worker_config()
    if conf['iib_index_image_output_registry']:
        old_output_pull_spec = output_pull_spec
        output_pull_spec = output_pull_spec.replace(
            conf['iib_registry'], conf['iib_index_image_output_registry'], 1)
        log.info(
            'Changed the bundle_image pull specification from %s to %s',
            old_output_pull_spec,
            output_pull_spec,
        )

    payload = {
        'arches': list(arches),
        'bundle_image': output_pull_spec,
        'state': 'complete',
        'state_reason': 'The request completed successfully',
    }
    update_request(request_id,
                   payload,
                   exc_msg='Failed setting the bundle image on the request')
示例#16
0
文件: api_v1.py 项目: zanssa/iib
def get_related_bundles(request_id):
    """
    Retrieve the related bundle images from the bundle CSV for a regenerate-bundle request.

    :param int request_id: the request ID that was passed in through the URL.
    :rtype: flask.Response
    :raise NotFound: if the request is not found or there are no related bundles for the request
    :raise Gone: if the related bundles for the build request have been removed due to expiration
    :raise ValidationError: if the request is of invalid type or is not completed yet
    """
    request_related_bundles_dir = flask.current_app.config[
        'IIB_REQUEST_RELATED_BUNDLES_DIR']
    s3_bucket_name = flask.current_app.config['IIB_AWS_S3_BUCKET_NAME']
    if not s3_bucket_name and not request_related_bundles_dir:
        raise NotFound()

    request = Request.query.get_or_404(request_id)
    if request.type != RequestTypeMapping.regenerate_bundle.value:
        raise ValidationError(
            f'The request {request_id} is of type {request.type_name}. '
            'This endpoint is only valid for requests of type regenerate-bundle.'
        )

    finalized = request.state.state_name in RequestStateMapping.get_final_states(
    )
    if not finalized:
        raise ValidationError(
            f'The request {request_id} is not complete yet.'
            ' related_bundles will be available once the request is complete.')

    # If S3 bucket is configured, fetch the related bundles file from the S3 bucket.
    # Else, check if related bundles are stored on the system itself and return them.
    # Otherwise, raise an IIBError.
    if s3_bucket_name:
        log_file = _get_artifact_file_from_s3_bucket(
            'related_bundles',
            f'{request_id}_related_bundles.json',
            request_id,
            request.temporary_data_expiration,
            s3_bucket_name,
        )
        return flask.Response(log_file.read(), mimetype='application/json')

    related_bundles_file_path = os.path.join(
        request_related_bundles_dir, f'{request_id}_related_bundles.json')
    if not os.path.exists(related_bundles_file_path):
        expired = request.temporary_data_expiration < datetime.utcnow()
        if expired:
            raise Gone(
                f'The related_bundles for the build request {request_id} no longer exist'
            )
        if request.organization:
            raise IIBError(
                'IIB is done processing the request and cannot find related_bundles. Please make '
                f'sure the iib_organization_customizations for organization {request.organization}'
                ' has related_bundles customization type set')
        flask.current_app.logger.warning(
            ' Please make sure either an S3 bucket is configured or the logs are'
            ' stored locally in a directory by specifying IIB_REQUEST_LOGS_DIR'
        )
        raise IIBError(
            'IIB is done processing the request and could not find related_bundles.'
        )

    with open(related_bundles_file_path) as f:
        return flask.Response(f.read(), mimetype='application/json')
示例#17
0
def handle_merge_request(
    source_from_index,
    deprecation_list,
    request_id,
    binary_image=None,
    target_index=None,
    overwrite_target_index=False,
    overwrite_target_index_token=None,
    distribution_scope=None,
    binary_image_config=None,
    build_tags=None,
):
    """
    Coordinate the work needed to merge old (N) index image with new (N+1) index image.

    :param str source_from_index: pull specification to be used as the base for building the new
        index image.
    :param str target_index: pull specification of content stage index image for the
        corresponding target index image.
    :param list deprecation_list: list of deprecated bundles for the target index image.
    :param int request_id: the ID of the IIB build request.
    :param str binary_image: the pull specification of the container image where the opm binary
        gets copied from.
    :param bool overwrite_target_index: if True, overwrite the input ``target_index`` with
        the built index image.
    :param str overwrite_target_index_token: the token used for overwriting the input
        ``target_index`` image. This is required to use ``overwrite_target_index``.
        The format of the token must be in the format "user:password".
    :param str distribution_scope: the scope for distribution of the index image, defaults to
        ``None``.
    :param build_tags: list of extra tag to use for intermetdiate index image
    :raises IIBError: if the index image merge fails.
    """
    _cleanup()
    prebuild_info = prepare_request_for_build(
        request_id,
        RequestConfigMerge(
            _binary_image=binary_image,
            overwrite_target_index_token=overwrite_target_index_token,
            source_from_index=source_from_index,
            target_index=target_index,
            distribution_scope=distribution_scope,
            binary_image_config=binary_image_config,
        ),
    )
    _update_index_image_build_state(request_id, prebuild_info)
    source_from_index_resolved = prebuild_info['source_from_index_resolved']
    target_index_resolved = prebuild_info['target_index_resolved']
    dockerfile_name = 'index.Dockerfile'

    with tempfile.TemporaryDirectory(prefix='iib-') as temp_dir:
        with set_registry_token(overwrite_target_index_token, source_from_index):
            source_fbc = is_image_fbc(source_from_index_resolved)
            target_fbc = is_image_fbc(target_index_resolved)

        # do not remove - logging requested by stakeholders
        if source_fbc:
            log.info("Processing source index image as File-Based Catalog image")
        if target_fbc:
            log.info("Processing target index image as File-Based Catalog image")

        if source_fbc and not target_fbc:
            err_msg = (
                'Cannot merge source File-Based Catalog index image into target SQLite index image.'
            )
            log.error(err_msg)
            raise IIBError(err_msg)

        set_request_state(request_id, 'in_progress', 'Getting bundles present in the index images')
        log.info('Getting bundles present in the source index image')
        with set_registry_token(overwrite_target_index_token, source_from_index):
            source_index_bundles, source_index_bundles_pull_spec = _get_present_bundles(
                source_from_index_resolved, temp_dir
            )

            target_index_bundles = []
            if target_index:
                log.info('Getting bundles present in the target index image')
                target_index_bundles, _ = _get_present_bundles(target_index_resolved, temp_dir)

        arches = list(prebuild_info['arches'])
        arch = sorted(arches)[0]

        missing_bundles, invalid_version_bundles = _add_bundles_missing_in_source(
            source_index_bundles,
            target_index_bundles,
            temp_dir,
            prebuild_info['binary_image'],
            source_from_index_resolved,
            request_id,
            arch,
            prebuild_info['target_ocp_version'],
            overwrite_target_index_token,
            distribution_scope=prebuild_info['distribution_scope'],
        )

        missing_bundle_paths = [bundle['bundlePath'] for bundle in missing_bundles]
        if missing_bundle_paths:
            add_max_ocp_version_property(missing_bundle_paths, temp_dir)
        set_request_state(request_id, 'in_progress', 'Deprecating bundles in the deprecation list')
        log.info('Deprecating bundles in the deprecation list')
        intermediate_bundles = missing_bundle_paths + source_index_bundles_pull_spec
        deprecation_bundles = get_bundles_from_deprecation_list(
            intermediate_bundles, deprecation_list
        )
        # We do not need to pass the invalid_version_bundles through the
        # get_bundles_from_deprecation_list function because we already know
        # they are present in the newly created index.
        deprecation_bundles = deprecation_bundles + [
            bundle['bundlePath'] for bundle in invalid_version_bundles
        ]

        if deprecation_bundles:
            intermediate_image_name = _get_external_arch_pull_spec(
                request_id, arch, include_transport=False
            )

            # we can check if source index is FBC or not because intermediate_image
            # will be always the same type because it is built
            # from source index image in _add_bundles_missing_in_source()
            if source_fbc:
                deprecate_bundles_fbc(
                    bundles=deprecation_bundles,
                    base_dir=temp_dir,
                    binary_image=prebuild_info['binary_image'],
                    from_index=intermediate_image_name,
                )
            else:
                # opm can only deprecate a bundle image on an existing index image. Build and
                # push a temporary index image to satisfy this requirement. Any arch will do.
                # NOTE: we cannot use local builds because opm commands fails,
                # index image has to be pushed to registry
                _build_image(temp_dir, 'index.Dockerfile', request_id, arch)
                _push_image(request_id, arch)

                deprecate_bundles(
                    bundles=deprecation_bundles,
                    base_dir=temp_dir,
                    binary_image=prebuild_info['binary_image'],
                    from_index=intermediate_image_name,
                    overwrite_target_index_token=overwrite_target_index_token,
                )

        if target_fbc:
            index_db_file = os.path.join(temp_dir, get_worker_config()['temp_index_db_path'])
            # make sure FBC is generated right before build
            fbc_dir = opm_migrate(index_db=index_db_file, base_dir=temp_dir)
            if not source_fbc:
                # when source image is not FBC, but final image should be an FBC image
                # we have to generate Dockerfile for FBC (with hidden index.db)
                dockerfile_path = os.path.join(temp_dir, dockerfile_name)
                if os.path.isfile(dockerfile_path):
                    log.info('Removing previously generated dockerfile.')
                    os.remove(dockerfile_path)
                opm_generate_dockerfile(
                    fbc_dir=fbc_dir,
                    base_dir=temp_dir,
                    index_db=index_db_file,
                    binary_image=prebuild_info['binary_image'],
                    dockerfile_name=dockerfile_name,
                )

        _add_label_to_index(
            'com.redhat.index.delivery.version',
            prebuild_info['target_ocp_version'],
            temp_dir,
            dockerfile_name,
        )

        _add_label_to_index(
            'com.redhat.index.delivery.distribution_scope',
            prebuild_info['distribution_scope'],
            temp_dir,
            dockerfile_name,
        )

        for arch in sorted(prebuild_info['arches']):
            _build_image(temp_dir, dockerfile_name, request_id, arch)
            _push_image(request_id, arch)

        # If the container-tool podman is used in the opm commands above, opm will create temporary
        # files and directories without the write permission. This will cause the context manager
        # to fail to delete these files. Adjust the file modes to avoid this error.
        chmod_recursively(
            temp_dir,
            dir_mode=(stat.S_IRWXU | stat.S_IRWXG),
            file_mode=(stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP),
        )

    output_pull_spec = _create_and_push_manifest_list(
        request_id, prebuild_info['arches'], build_tags
    )
    _update_index_image_pull_spec(
        output_pull_spec,
        request_id,
        prebuild_info['arches'],
        target_index,
        overwrite_target_index,
        overwrite_target_index_token,
        target_index_resolved,
    )
    set_request_state(
        request_id, 'complete', 'The index image was successfully cleaned and updated.'
    )
示例#18
0
def opm_generate_dockerfile(fbc_dir,
                            base_dir,
                            index_db,
                            binary_image,
                            dockerfile_name=None):
    """
    Generate Dockerfile using opm command and adding index.db to hidden location.

    :param str fbc_dir: directory containing file-based catalog (JSON or YAML files).
    :param str base_dir: base directory where Dockerfile should be created.
    :param str index_db: path to SQLite index.db which should be put to hidden location in container
    :param str binary_image: pull spec of binary image in which to build catalog.
    :param str dockerfile_name: name of generated Dockerfile.
    :return: Returns path to generated Dockerfile
    :raises: IIBError when Dockerfile was not generated
    :rtype: str
    """
    from iib.workers.tasks.utils import run_cmd

    # we do not want to continue if Dockerfile already exists
    dockerfile_name_opm_default = f"{os.path.basename(fbc_dir)}.Dockerfile"
    tmp_dockerfile_name = dockerfile_name or dockerfile_name_opm_default
    dockerfile_path = os.path.join(base_dir, tmp_dockerfile_name)

    if os.path.isfile(dockerfile_path):
        log.info(
            "Skipping generation of Dockerfile. "
            "Dockerfile for file-based catalog already exists at %s",
            dockerfile_path,
        )
        return dockerfile_path

    cmd = [
        'opm',
        'alpha',
        'generate',
        'dockerfile',
        os.path.abspath(fbc_dir),
        '--binary-image',
        binary_image,
    ]

    log.info('Generating Dockerfile with binary image %s' % binary_image)
    run_cmd(cmd, {'cwd': base_dir},
            exc_msg='Failed to generate Dockerfile for file-based catalog')

    # check if opm command generated Dockerfile successfully
    dockerfile_path_opm_default = os.path.join(base_dir,
                                               dockerfile_name_opm_default)
    if not os.path.isfile(dockerfile_path_opm_default):
        error_msg = f"Cannot find generated Dockerfile at {dockerfile_path_opm_default}"
        log.error(error_msg)
        raise IIBError(error_msg)

    # we should rename Dockerfile generated by opm if `dockerfile_name` parameter is set
    if dockerfile_name:
        if os.path.exists(dockerfile_path):
            log.info('Rewriting Dockerfile %s with newly generated by opm.',
                     dockerfile_path)
        os.rename(dockerfile_path_opm_default, dockerfile_path)

    db_path = get_worker_config()['hidden_index_db_path']
    rel_path_index_db = os.path.relpath(index_db, base_dir)
    with open(dockerfile_path, 'a') as f:
        f.write(f'\nADD {rel_path_index_db} {db_path}\n')

    log.info("Dockerfile was successfully generated.")
    return dockerfile_path
示例#19
0
# SPDX-License-Identifier: GPL-3.0-or-later
from unittest import mock

import pytest

from iib.exceptions import IIBError
from iib.workers.tasks import general


@pytest.mark.parametrize(
    'exc, expected_msg',
    (
        (IIBError('Is it lunch time yet?'), 'Is it lunch time yet?'),
        (RuntimeError('I cannot run in the rain!'),
         'An unknown error occurred'),
    ),
)
@mock.patch('iib.workers.tasks.general.set_request_state')
def test_failed_request_callback(mock_srs, exc, expected_msg):
    general.failed_request_callback(None, exc, None, 3)
    mock_srs(3, expected_msg)
示例#20
0
 def _func_to_retry():
     mock_func()
     raise IIBError('Some error')
示例#21
0
文件: build.py 项目: fromanirh/iib
def _prepare_request_for_build(
    binary_image,
    request_id,
    from_index=None,
    overwrite_from_index_token=None,
    add_arches=None,
    bundles=None,
):
    """
    Prepare the request for the index image build.

    All information that was retrieved and/or calculated for the next steps in the build are
    returned as a dictionary.

    This function was created so that code didn't need to be duplicated for the ``add`` and ``rm``
    request types.

    :param str binary_image: the pull specification of the container image where the opm binary
        gets copied from.
    :param int request_id: the ID of the IIB build request
    :param str from_index: the pull specification of the container image containing the index that
        the index image build will be based from.
    :param str overwrite_from_index_token: the token used for overwriting the input
        ``from_index`` image. This is required for non-privileged users to use
        ``overwrite_from_index``. The format of the token must be in the format "user:password".
    :param list add_arches: the list of arches to build in addition to the arches ``from_index`` is
        currently built for; if ``from_index`` is ``None``, then this is used as the list of arches
        to build the index image for
    :param list bundles: the list of bundles to create the bundle mapping on the request
    :return: a dictionary with the keys: arches, binary_image_resolved, from_index_resolved, and
        ocp_version.
    :raises IIBError: if the container image resolution fails or the architectures couldn't be
        detected.
    """
    if bundles is None:
        bundles = []

    set_request_state(request_id, 'in_progress',
                      'Resolving the container images')

    if add_arches:
        arches = set(add_arches)
    else:
        arches = set()

    binary_image_resolved = _get_resolved_image(binary_image)
    binary_image_arches = _get_image_arches(binary_image_resolved)

    if from_index:
        with set_registry_token(overwrite_from_index_token, from_index):
            from_index_resolved = _get_resolved_image(from_index)
            from_index_arches = _get_image_arches(from_index_resolved)
            ocp_version = (get_image_label(
                from_index_resolved, 'com.redhat.index.delivery.version')
                           or 'v4.5')
        arches = arches | from_index_arches
    else:
        from_index_resolved = None
        ocp_version = 'v4.5'

    if not arches:
        raise IIBError('No arches were provided to build the index image')

    arches_str = ', '.join(sorted(arches))
    log.debug('Set to build the index image for the following arches: %s',
              arches_str)

    if not arches.issubset(binary_image_arches):
        raise IIBError(
            'The binary image is not available for the following arches: {}'.
            format(', '.join(sorted(arches - binary_image_arches))))

    bundle_mapping = {}
    for bundle in bundles:
        operator = get_image_label(
            bundle, 'operators.operatorframework.io.bundle.package.v1')
        if operator:
            bundle_mapping.setdefault(operator, []).append(bundle)

    return {
        'arches': arches,
        'binary_image_resolved': binary_image_resolved,
        'bundle_mapping': bundle_mapping,
        'from_index_resolved': from_index_resolved,
        'ocp_version': ocp_version,
    }
示例#22
0
def prepare_request_for_build(request_id, build_request_config):
    """Prepare the request for the index image build.

    All information that was retrieved and/or calculated for the next steps in the build are
    returned as a dictionary.
    This function was created so that code didn't need to be duplicated for the ``add`` and ``rm``
    request types.
    :param RequestConfig build_request_config: build request configuration
    :return: a dictionary with the keys: arches, binary_image_resolved, from_index_resolved, and
    ocp_version.
    :rtype: dict
    :raises IIBError: if the container image resolution fails or the architectures couldn't be
    detected.
    """
    bundles = None
    if hasattr(build_request_config, "bundles"):
        bundles = build_request_config.bundles

    if bundles is None:
        bundles = []

    set_request_state(request_id, 'in_progress', 'Resolving the container images')

    # Use v4.5 as default version
    index_info = get_all_index_images_info(
        build_request_config,
        [("from_index", "v4.5"), ("source_from_index", "v4.5"), ("target_index", "v4.6")],
    )
    arches = gather_index_image_arches(build_request_config, index_info)
    if not arches:
        raise IIBError('No arches were provided to build the index image')

    arches_str = ', '.join(sorted(arches))
    log.debug('Set to build the index image for the following arches: %s', arches_str)

    # Use the distribution_scope of the from_index as the resolved distribution scope for `Add`,
    # and 'Rm' requests, but use the distribution_scope of the target_index as the resolved
    # distribution scope for `merge-index-image` requests.
    resolved_distribution_scope = index_info['from_index']['resolved_distribution_scope']
    if (
        hasattr(build_request_config, "source_from_index")
        and build_request_config.source_from_index
    ):
        resolved_distribution_scope = index_info['target_index']['resolved_distribution_scope']

    distribution_scope = _validate_distribution_scope(
        resolved_distribution_scope, build_request_config.distribution_scope
    )

    binary_image = build_request_config.binary_image(index_info['from_index'], distribution_scope)

    binary_image_resolved = get_resolved_image(binary_image)
    binary_image_arches = get_image_arches(binary_image_resolved)

    if not arches.issubset(binary_image_arches):
        raise IIBError(
            'The binary image is not available for the following arches: {}'.format(
                ', '.join(sorted(arches - binary_image_arches))
            )
        )

    bundle_mapping = {}
    for bundle in bundles:
        operator = get_image_label(bundle, 'operators.operatorframework.io.bundle.package.v1')
        if operator:
            bundle_mapping.setdefault(operator, []).append(bundle)

    return {
        'arches': arches,
        'binary_image': binary_image,
        'binary_image_resolved': binary_image_resolved,
        'bundle_mapping': bundle_mapping,
        'from_index_resolved': index_info["from_index"]['resolved_from_index'],
        'ocp_version': index_info["from_index"]['ocp_version'],
        'distribution_scope': distribution_scope,
        'source_from_index_resolved': index_info['source_from_index']['resolved_from_index'],
        'source_ocp_version': index_info['source_from_index']['ocp_version'],
        'target_index_resolved': index_info['target_index']['resolved_from_index'],
        'target_ocp_version': index_info['target_index']['ocp_version'],
    }
示例#23
0
def handle_create_empty_index_request(
    from_index,
    request_id,
    output_fbc=False,
    binary_image=None,
    labels=None,
    binary_image_config=None,
):
    """Coordinate the the work needed to create the index image with labels.

    :param str from_index: the pull specification of the container image containing the index that
        the index image build will be based from.
    :param int request_id: the ID of the IIB build request
    :param bool output_fbc: specifies whether a File-based Catalog index image should be created
    :param str binary_image: the pull specification of the container image where the opm binary
        gets copied from.
    :param dict labels: the dict of labels required to be added to a new index image
    :param dict binary_image_config: the dict of config required to identify the appropriate
        ``binary_image`` to use.
    """
    _cleanup()
    prebuild_info = prepare_request_for_build(
        request_id,
        RequestConfigCreateIndexImage(
            _binary_image=binary_image,
            from_index=from_index,
            binary_image_config=binary_image_config,
        ),
    )
    from_index_resolved = prebuild_info['from_index_resolved']
    prebuild_info['labels'] = labels

    if not output_fbc and is_image_fbc(from_index_resolved):
        log.debug('%s is FBC index image', from_index_resolved)
        err_msg = 'Cannot create SQLite index image from File-Based Catalog index image'
        log.error(err_msg)
        raise IIBError(err_msg)

    _update_index_image_build_state(request_id, prebuild_info)

    with tempfile.TemporaryDirectory(prefix='iib-') as temp_dir:
        set_request_state(request_id, 'in_progress', 'Checking operators present in index image')

        operators = _get_present_operators(from_index_resolved, temp_dir)

        # if output_fbc parameter is true, create an empty FBC index image
        # else create empty SQLite index image
        if output_fbc:
            log.debug('Creating empty FBC index image from %s', from_index)
            opm_create_empty_fbc(
                request_id=request_id,
                temp_dir=temp_dir,
                from_index_resolved=from_index_resolved,
                from_index=from_index,
                binary_image=prebuild_info['binary_image'],
                operators=operators,
            )
        else:
            set_request_state(request_id, 'in_progress', 'Removing operators from index image')
            _opm_index_rm(temp_dir, operators, prebuild_info['binary_image'], from_index_resolved)

        set_request_state(
            request_id, 'in_progress', 'Getting and updating labels for new index image'
        )

        iib_labels = {
            'com.redhat.index.delivery.version': prebuild_info['ocp_version'],
            'com.redhat.index.delivery.distribution_scope': prebuild_info['distribution_scope'],
        }

        if labels:
            iib_labels.update(labels)
        for index_label, value in iib_labels.items():
            _add_label_to_index(index_label, value, temp_dir, 'index.Dockerfile')

        arches = prebuild_info['arches']

        for arch in sorted(arches):
            _build_image(temp_dir, 'index.Dockerfile', request_id, arch)
            _push_image(request_id, arch)

    set_request_state(request_id, 'in_progress', 'Creating the manifest list')
    output_pull_spec = _create_and_push_manifest_list(request_id, arches, [])

    _update_index_image_pull_spec(
        output_pull_spec=output_pull_spec,
        request_id=request_id,
        arches=arches,
        from_index=from_index,
        resolved_prebuild_from_index=from_index_resolved,
    )
    set_request_state(request_id, 'complete', 'The empty index image was successfully created')
示例#24
0
def test_create_and_push_manifest_list(mock_open, mock_run_cmd, mock_td,
                                       tmp_path):
    mock_td.return_value.__enter__.return_value = tmp_path
    mock_run_cmd.side_effect = [
        IIBError('Manifest list not found locally.'),
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ]

    output = []
    mock_open().__enter__().write.side_effect = lambda x: output.append(x)
    build._create_and_push_manifest_list(3, {'amd64', 's390x'},
                                         ['extra_build_tag1'])

    expected_calls = [
        mock.call(
            ['buildah', 'manifest', 'rm', 'registry:8443/iib-build:3'],
            exc_msg=
            ('Failed to remove local manifest list. registry:8443/iib-build:3 does not exist'
             ),
        ),
        mock.call(
            ['buildah', 'manifest', 'create', 'registry:8443/iib-build:3'],
            exc_msg=
            'Failed to create the manifest list locally: registry:8443/iib-build:3',
        ),
        mock.call(
            [
                'buildah',
                'manifest',
                'add',
                'registry:8443/iib-build:3',
                'docker://registry:8443/iib-build:3-amd64',
            ],
            exc_msg=('Failed to add docker://registry:8443/iib-build:3-amd64'
                     ' to the local manifest list: registry:8443/iib-build:3'),
        ),
        mock.call(
            [
                'buildah',
                'manifest',
                'add',
                'registry:8443/iib-build:3',
                'docker://registry:8443/iib-build:3-s390x',
            ],
            exc_msg=('Failed to add docker://registry:8443/iib-build:3-s390x'
                     ' to the local manifest list: registry:8443/iib-build:3'),
        ),
        mock.call(
            [
                'buildah',
                'manifest',
                'push',
                '--all',
                '--format',
                'v2s2',
                'registry:8443/iib-build:3',
                'docker://registry:8443/iib-build:3',
            ],
            exc_msg=
            'Failed to push the manifest list to registry:8443/iib-build:3',
        ),
        mock.call(
            [
                'buildah', 'manifest', 'rm',
                'registry:8443/iib-build:extra_build_tag1'
            ],
            exc_msg='Failed to remove local manifest list. '
            'registry:8443/iib-build:extra_build_tag1 does not exist',
        ),
        mock.call(
            [
                'buildah', 'manifest', 'create',
                'registry:8443/iib-build:extra_build_tag1'
            ],
            exc_msg='Failed to create the manifest list locally: '
            'registry:8443/iib-build:extra_build_tag1',
        ),
        mock.call(
            [
                'buildah',
                'manifest',
                'add',
                'registry:8443/iib-build:extra_build_tag1',
                'docker://registry:8443/iib-build:3-amd64',
            ],
            exc_msg=
            ('Failed to add docker://registry:8443/iib-build:3-amd64'
             ' to the local manifest list: registry:8443/iib-build:extra_build_tag1'
             ),
        ),
        mock.call(
            [
                'buildah',
                'manifest',
                'add',
                'registry:8443/iib-build:extra_build_tag1',
                'docker://registry:8443/iib-build:3-s390x',
            ],
            exc_msg=
            ('Failed to add docker://registry:8443/iib-build:3-s390x'
             ' to the local manifest list: registry:8443/iib-build:extra_build_tag1'
             ),
        ),
        mock.call(
            [
                'buildah',
                'manifest',
                'push',
                '--all',
                '--format',
                'v2s2',
                'registry:8443/iib-build:extra_build_tag1',
                'docker://registry:8443/iib-build:extra_build_tag1',
            ],
            exc_msg=
            'Failed to push the manifest list to registry:8443/iib-build:extra_build_tag1',
        ),
    ]
    assert mock_run_cmd.call_args_list == expected_calls