def test_get_koji_session(self, config, raise_error): required_config = """\ version: 1 source_registry: url: source_registry.com registries: - url: registry_url openshift: url: openshift_url """ config += "\n" + required_config if raise_error: with pytest.raises(Exception): read_yaml(config, 'schemas/config.json') return config_json = read_yaml(config, 'schemas/config.json') auth_info = { "proxyuser": config_json['koji']['auth'].get('proxyuser'), "ssl_certs_dir": config_json['koji']['auth'].get('ssl_certs_dir'), "krb_principal": config_json['koji']['auth'].get('krb_principal'), "krb_keytab": config_json['koji']['auth'].get('krb_keytab_path') } use_fast_upload = config_json['koji'].get('use_fast_upload', True) conf = Configuration(raw_config=config_json) (flexmock(atomic_reactor.utils.koji).should_receive( 'create_koji_session').with_args( config_json['koji']['hub_url'], auth_info, use_fast_upload).once().and_return(True)) get_koji_session(conf)
def source_get_unique_image(self) -> ImageName: source_result = self.workflow.data.prebuild_results[PLUGIN_FETCH_SOURCES_KEY] koji_build_id = source_result['sources_for_koji_build_id'] kojisession = get_koji_session(self.workflow.conf) timestamp = osbs.utils.utcnow().strftime('%Y%m%d%H%M%S') random.seed() current_platform = platform.processor() or 'x86_64' tag_segments = [ self.koji_target or 'none', str(random.randrange(10**(RAND_DIGITS - 1), 10**RAND_DIGITS)), timestamp, current_platform ] tag = '-'.join(tag_segments) get_build_meta = kojisession.getBuild(koji_build_id) pull_specs = get_build_meta['extra']['image']['index']['pull'] source_image_spec = ImageName.parse(pull_specs[0]) source_image_spec.tag = tag organization = self.workflow.conf.registries_organization if organization: source_image_spec.enclose(organization) source_image_spec.registry = self.workflow.conf.registry['uri'] return source_image_spec
def resolve_modules(self, modules) -> Dict[str, ModuleInfo]: koji_session = get_koji_session(self.workflow_config) resolved_modules = {} for module_spec in modules: build, rpm_list = get_koji_module_build(koji_session, module_spec) # The returned RPM list contains source RPMs and RPMs for all # architectures. rpms = [ '{name}-{epochnum}:{version}-{release}.{arch}.rpm'.format( epochnum=rpm['epoch'] or 0, **rpm) for rpm in rpm_list ] # strict=False - don't break if new fields are added mmd = Modulemd.ModuleStream.read_string( build['extra']['typeinfo']['module']['modulemd_str'], strict=False) # Make sure we have a version 2 modulemd file mmd = mmd.upgrade(Modulemd.ModuleStreamVersionEnum.TWO) resolved_modules[module_spec.name] = ModuleInfo( module_spec.name, module_spec.stream, module_spec.version, mmd, rpms) return resolved_modules
def add_koji_repo(self): xmlrpc = get_koji_session(self.workflow.conf) pathinfo = self.workflow.conf.koji_path_info proxy = self.workflow.conf.yum_proxy if not self.target: self.log.info('no target provided, not adding koji repo') return target_info = xmlrpc.getBuildTarget(self.target) if target_info is None: self.log.error("provided target '%s' doesn't exist", self.target) raise RuntimeError("Provided target '%s' doesn't exist!" % self.target) tag_info = xmlrpc.getTag(target_info['build_tag_name']) if not tag_info or 'name' not in tag_info: self.log.warning("No tag info was retrieved") return repo_info = xmlrpc.getRepo(tag_info['id']) if not repo_info or 'id' not in repo_info: self.log.warning("No repo info was retrieved") return # to use urljoin, we would have to append '/', so let's append everything baseurl = pathinfo.repo(repo_info['id'], tag_info['name']) + "/$basearch" self.log.info("baseurl = '%s'", baseurl) repo = { 'name': 'atomic-reactor-koji-plugin-%s' % self.target, 'baseurl': baseurl, 'enabled': 1, 'gpgcheck': 0, } # yum doesn't accept a certificate path in sslcacert - it requires a db with added cert # dnf ignores that option completely # we have to fall back to sslverify=0 everytime we get https repo from brew so we'll surely # be able to pull from it if baseurl.startswith("https://"): self.log.info("Ignoring certificates in the repo") repo['sslverify'] = 0 if proxy: self.log.info("Setting yum proxy to %s", proxy) repo['proxy'] = proxy yum_repo = YumRepo(os.path.join(YUM_REPOS_DIR, self.target)) path = yum_repo.dst_filename self.log.info("yum repo of koji target: '%s'", path) yum_repo.content = render_yum_repo(repo, escape_dollars=False) for platform in self.platforms: self.yum_repos[platform].append(yum_repo)
def get_koji_user(self): unknown_user = self.workflow.conf.cachito.get('unknown_user', 'unknown_user') try: koji_task_id = int(self.workflow.user_params.get('koji_task_id')) except (ValueError, TypeError, AttributeError): msg = 'Unable to get koji user: Invalid Koji task ID' self.log.warning(msg) return unknown_user koji_session = get_koji_session(self.workflow.conf) return get_koji_task_owner(koji_session, koji_task_id).get('name', unknown_user)
def run(self): """ Run the plugin. """ # get the session and token information in case we need to refund a failed build self.session = get_koji_session(self.workflow.conf) server_dir = self.get_server_dir() koji_metadata = self.combine_metadata_fragments() if is_scratch_build(self.workflow): self.upload_scratch_metadata(koji_metadata, server_dir) return # for all builds which have koji task if self.koji_task_id: task_info = self.session.getTaskInfo(self.koji_task_id) task_state = koji.TASK_STATES[task_info['state']] if task_state != 'OPEN': self.log.error( "Koji task is not in Open state, but in %s, not importing build", task_state) return self._upload_output_files(server_dir) build_token = self.workflow.data.reserved_token build_id = self.workflow.data.reserved_build_id if build_id is not None and build_token is not None: koji_metadata['build']['build_id'] = build_id try: if build_token: build_info = self.session.CGImport(koji_metadata, server_dir, token=build_token) else: build_info = self.session.CGImport(koji_metadata, server_dir) except Exception: self.log.debug("metadata: %r", koji_metadata) raise # Older versions of CGImport do not return a value. build_id = build_info.get("id") if build_info else None self.log.debug("Build information: %s", json.dumps(build_info, sort_keys=True, indent=4)) return build_id
def run(self): if not self.workflow.conf.koji.get('reserve_build', False): self.log.debug( "Build reservation feature is not enabled. Skip cancelation.") return reserved_token = self.workflow.data.reserved_token reserved_build_id = self.workflow.data.reserved_build_id if reserved_token is None and reserved_build_id is None: self.log.debug("There is no reserved build. Skip cancelation.") return session = get_koji_session(self.workflow.conf) build_info = session.getBuild(reserved_build_id) if build_info is None: self.log.warning( "Cannot get the reserved build %s from Brew/Koji.", reserved_build_id) return state_building = koji.BUILD_STATES["BUILDING"] state_failed = koji.BUILD_STATES["FAILED"] cur_state = build_info["state"] cur_state_name = koji.BUILD_STATES[cur_state] if cur_state != state_building: self.log.debug( "Reserved build %s is in state %s already. Skip cancelation.", reserved_build_id, cur_state_name) return failed, cancelled = self.workflow.check_build_outcome() if not failed and cur_state == state_building: session.CGRefundBuild(PROG, reserved_build_id, reserved_token, state_failed) err_msg = ( f"Build process succeeds, but the reserved build {reserved_build_id} " f"is in state {cur_state_name}. " f"Please check if koji_import plugin is configured properly to execute." ) raise RuntimeError(err_msg) if cancelled: state = koji.BUILD_STATES["CANCELED"] else: state = state_failed session.CGRefundBuild(PROG, reserved_build_id, reserved_token, state)
def __init__(self, workflow, append=False): """ constructor :param workflow: DockerBuildWorkflow instance :param append: if True, the release will be obtained by appending a '.' and a unique integer to the release label in the dockerfile. """ # call parent constructor super(BumpReleasePlugin, self).__init__(workflow) self.append = append self.xmlrpc = get_koji_session(self.workflow.conf) koji_setting = self.workflow.conf.koji self.reserve_build = koji_setting.get('reserve_build', False)
def __init__(self, workflow, koji_parent_build=None): """ :param workflow: DockerBuildWorkflow instance :param koji_parent_build: str, either Koji build ID or Koji build NVR """ super(InjectParentImage, self).__init__(workflow) self.koji_session = get_koji_session(self.workflow.conf) try: self.koji_parent_build = int(koji_parent_build) except (ValueError, TypeError): self.koji_parent_build = koji_parent_build self._koji_parent_build_info = None self._repositories = None self._new_parent_image = None
def run(self): if not self.workflow.data.dockerfile_images.custom_parent_image: self.log.info('Nothing to do for non-custom base images') return self.update_repos_from_composes() image_build_conf = self.get_image_build_conf() self.session = get_koji_session(self.workflow.conf) task_id, image_name = self.run_image_task(image_build_conf) inject_filesystem_call = functools.partial(self.inject_filesystem, task_id, image_name) self.workflow.build_dir.for_each_platform(inject_filesystem_call) return { 'filesystem-koji-task-id': task_id, }
def run(self): """ Run the plugin. """ if is_scratch_build(self.workflow): self.log.info('scratch build, skipping plugin') return if not self.target: self.log.info('no koji target provided, skipping plugin') return build_id = self.workflow.data.postbuild_results.get(KojiImportPlugin.key) if not build_id: self.log.info('No koji build from %s', KojiImportPlugin.key) return session = get_koji_session(self.workflow.conf) build_tag = tag_koji_build(session, build_id, self.target, poll_interval=self.poll_interval) return build_tag
def run(self): self.session = get_koji_session(self.workflow.conf) nvr_requests = [ NvrRequest(**nvr_request) for nvr_request in util.read_fetch_artifacts_koji(self.workflow) or [] ] pnc_requests = util.read_fetch_artifacts_pnc(self.workflow) or {} url_requests = util.read_fetch_artifacts_url(self.workflow) or [] components, nvr_download_queue = self.process_by_nvr(nvr_requests) url_download_queue, source_download_queue = self.process_by_url( url_requests) pnc_artifact_ids, pnc_download_queue, pnc_build_metadata = self.process_pnc_requests( pnc_requests) download_queue = pnc_download_queue + nvr_download_queue + url_download_queue download_to_build_dir = functools.partial(self.download_files, download_queue) self.workflow.build_dir.for_all_platforms_copy(download_to_build_dir) return { 'components': components, 'download_queue': [dataclasses.asdict(download) for download in download_queue], 'no_source': self.no_source_artifacts, 'pnc_artifact_ids': pnc_artifact_ids, 'pnc_build_metadata': pnc_build_metadata, 'source_download_queue': source_download_queue, 'source_url_to_artifacts': self.source_url_to_artifacts, }
def __init__( self, workflow, koji_build_id=None, koji_build_nvr=None, signing_intent=None, ): """ :param workflow: DockerBuildWorkflow instance :param koji_build_id: int, container image koji build id :param koji_build_nvr: str, container image koji build NVR :param signing_intent: str, ODCS signing intent name """ if not koji_build_id and not koji_build_nvr: err_msg = ( '{} expects either koji_build_id or koji_build_nvr to be defined' .format(self.__class__.__name__)) raise TypeError(err_msg) type_errors = [] if koji_build_id is not None and not isinstance(koji_build_id, int): type_errors.append('koji_build_id must be an int. Got {}'.format( type(koji_build_id))) if koji_build_nvr is not None and not isinstance(koji_build_nvr, str): type_errors.append('koji_build_nvr must be a str. Got {}'.format( type(koji_build_nvr))) if type_errors: raise TypeError(type_errors) super(FetchSourcesPlugin, self).__init__(workflow) self.koji_build = None self.koji_build_id = koji_build_id self.koji_build_nvr = koji_build_nvr self.signing_intent = signing_intent self.session = get_koji_session(self.workflow.conf) self.pathinfo = self.workflow.conf.koji_path_info self._pnc_util = None
def __init__(self, workflow, poll_interval=DEFAULT_POLL_INTERVAL, poll_timeout=DEFAULT_POLL_TIMEOUT): """ :param workflow: DockerBuildWorkflow instance :param poll_interval: int, seconds between polling for Koji build :param poll_timeout: int, max amount of seconds to wait for Koji build """ super(KojiParentPlugin, self).__init__(workflow) self.koji_session = get_koji_session(self.workflow.conf) self.poll_interval = poll_interval self.poll_timeout = poll_timeout self._base_image_nvr = None self._base_image_build = None self._parent_builds = {} self._poll_start = None self.platforms = get_platforms(self.workflow.data) # RegistryClient instances cached by registry name self.registry_clients = {} self._deep_manifest_list_inspection = self.workflow.conf.deep_manifest_list_inspection
def koji_session(self): if not self._koji_session: self._koji_session = get_koji_session(self.workflow.conf) return self._koji_session
def __init__(self, workflow, smtp_host=None, from_address=None, send_on=(MANUAL_SUCCESS, MANUAL_FAIL), url=None, error_addresses=(), additional_addresses=(), email_domain=None, to_koji_submitter=False, to_koji_pkgowner=False, use_auth=None, verify_ssl=None): """ constructor :param workflow: DockerBuildWorkflow instance :param send_on: list of str, list of build states when a notification should be sent see 'allowed_states' constant and rules in '_should_send' function :param url: str, URL to OSv3 instance where the build logs are stored :param smtp_host: str, URL of SMTP server to use to send the message (e.g. "foo.com:25") :param from_address: str, the "From" of the notification email :param error_addresses: list of str, list of email addresses where to send an email if an error occurred (e.g. if we can't find out who to notify about the failed build) :param additional_addresses: list of str, always send a message to these email addresses :param email_domain: str, email domain used when email addresses cannot be fetched via kerberos principal :param to_koji_submitter: bool, send a message to the koji submitter :param to_koji_pkgowner: bool, send messages to koji package owners """ super(SendMailPlugin, self).__init__(workflow) self.submitter = self.DEFAULT_SUBMITTER self.send_on = set(send_on) self.smtp = self.workflow.conf.smtp self.additional_addresses = self.smtp.get('additional_addresses', ()) self.from_address = self.smtp.get('from_address') self.error_addresses = self.smtp.get('error_addresses', ()) self.email_domain = self.smtp.get('domain') self.to_koji_submitter = self.smtp.get('send_to_submitter', False) self.to_koji_pkgowner = self.smtp.get('send_to_pkg_owner', False) self.url = self.workflow.conf.openshift['url'] self.koji_task_id = None try: koji_task_id = self.workflow.user_params.get('koji_task_id') except Exception: self.log.info("Failed to fetch koji task ID") else: if koji_task_id: self.koji_task_id = koji_task_id self.log.info("Koji task ID: %s", self.koji_task_id) else: self.log.info("No koji task") self.koji_build_id = self.workflow.data.postbuild_results.get( KojiImportPlugin.key) if not self.koji_build_id: self.log.info("Failed to fetch koji build ID") else: self.log.info("Koji build ID: %s", self.koji_build_id) self.session = None if self.workflow.conf.koji['hub_url']: try: self.session = get_koji_session(self.workflow.conf) except Exception: self.log.exception("Failed to connect to koji") self.session = None else: self.log.info("Koji connection established")
def run(self) -> Optional[List[str]]: """ run the plugin """ if self.koji_target: koji_session = get_koji_session(self.workflow.conf) self.log.info("Checking koji target for platforms") event_id = koji_session.getLastEvent()['id'] target_info = koji_session.getBuildTarget(self.koji_target, event=event_id) build_tag = target_info['build_tag'] koji_build_conf = koji_session.getBuildConfig(build_tag, event=event_id) koji_platforms = koji_build_conf['arches'] if not koji_platforms: self.log.info("No platforms found in koji target") return None platforms = koji_platforms.split() self.log.info("Koji platforms are %s", sorted(platforms)) if is_scratch_build(self.workflow) or is_isolated_build(self.workflow): override_platforms = set(get_orchestrator_platforms(self.workflow) or []) if override_platforms and override_platforms != set(platforms): sorted_platforms = sorted(override_platforms) self.log.info("Received user specified platforms %s", sorted_platforms) self.log.info("Using them instead of koji platforms") # platforms from user params do not match platforms from koji target # that almost certainly means they were overridden and should be used return sorted_platforms else: platforms = get_orchestrator_platforms(self.workflow) user_platforms = sorted(platforms) if platforms else None self.log.info("No koji platforms. User specified platforms are %s", user_platforms) if not platforms: raise RuntimeError("Cannot determine platforms; no koji target or platform list") # Filter platforms based on configured remote hosts remote_host_pools = self.workflow.conf.remote_hosts.get("pools", {}) enabled_platforms = [] defined_but_disabled = [] def has_enabled_hosts(platform: str) -> bool: platform_hosts = remote_host_pools.get(platform, {}) return any(host_info["enabled"] for host_info in platform_hosts.values()) for p in platforms: if has_enabled_hosts(p): enabled_platforms.append(p) elif p in remote_host_pools: defined_but_disabled.append(p) else: self.log.warning("No remote hosts found for platform '%s' in " "reactor config map, skipping", p) if defined_but_disabled: msg = 'Platforms specified in config map, but have all remote hosts disabled' \ ' {}'.format(defined_but_disabled) raise RuntimeError(msg) final_platforms = self._limit_platforms(enabled_platforms) self.log.info("platforms in limits : %s", final_platforms) if not final_platforms: self.log.error("platforms in limits are empty") raise RuntimeError("No platforms to build for") self.workflow.build_dir.init_build_dirs(final_platforms, self.workflow.source) return final_platforms