Beispiel #1
0
def bugreport(name):
    """
    Redirect to a pre-filled bugzilla new bug page.
    """
    # Package must have last build, so we can have rebuild instructions.
    # It doesn't need to be failing, that's up to the user to check.
    package = db.query(Package)\
                .filter(Package.name == name)\
                .filter(Package.blocked == False)\
                .filter(Package.last_complete_build_id != None)\
                .filter(Package.collection_id == g.current_collections[0].id)\
                .options(joinedload(Package.last_complete_build))\
                .first() or abort(404)
    # Set up variables taht are interpolated into a template specified by configuration
    variables = package.srpm_nvra or abort(404)
    variables['package'] = package
    variables['collection'] = package.collection
    # Absolute URL of this instance, for the link back to Koschei
    external_url = frontend_config.get('external_url', request.host_url).rstrip('/')
    package_url = url_for('package_detail', name=package.name)
    variables['url'] = f'{external_url}{package_url}'
    template = get_config('bugreport.template')
    bug = {key: template[key].format(**variables) for key in template.keys()}
    bug['comment'] = dedent(bug['comment']).strip()
    query = urlencode(bug)
    bugreport_url = get_config('bugreport.url').format(query=query)
    return redirect(bugreport_url)
Beispiel #2
0
    def add_repo_to_sack(self, request, sack):
        desc = self.get_user_repo_descriptor(request)
        repo_dir = os.path.join(get_config('directories.cachedir'), 'user_repos')
        repo = repo_util.get_repo(repo_dir, desc, download=True)
        if not repo:
            raise RequestProcessingError("Cannot download user repo")
        sack.load_repo(repo, load_filelists=True)
        if get_config('copr.overriding_by_exclusions', True):
            exclusions = []
            pkg_by_name = defaultdict(list)
            for pkg in hawkey.Query(sack):
                pkg_by_name[pkg.name].append(pkg)

            def pkg_cmp(pkg1, pkg2):
                return util.compare_evr(
                    (pkg1.epoch, pkg1.version, pkg1.release),
                    (pkg2.epoch, pkg2.version, pkg2.release),
                )

            for pkgs in pkg_by_name.values():
                if len(pkgs) > 1:
                    # TODO there are duplicated packages in base repo for unknown reason
                    exclusions += sorted(pkgs, key=cmp_to_key(pkg_cmp))[:-1]

            sack.add_excludes(exclusions)
Beispiel #3
0
    def generate_dependency_changes(self, collection, repo_id, sack, packages, brs):
        """
        Generates and persists dependency changes for given list of packages.
        Emits package state change events.
        """
        # pylint:disable=too-many-locals
        results = []

        build_group = self.get_build_group(collection, repo_id)
        if build_group is None:
            raise RuntimeError(
                f"No build group found for {collection.name} at repo_id {repo_id}"
            )
        gen = ((package, self.resolve_dependencies(sack, br, build_group))
               for package, br in zip(packages, brs))
        queue_size = get_config('dependency.resolver_queue_size')
        gen = util.parallel_generator(gen, queue_size=queue_size)
        pkgs_done = 0
        pkgs_reported = 0
        progres_reported_at = time.time()
        for package, (resolved, curr_problems, curr_deps) in gen:
            changes = []
            if curr_deps is not None:
                prev_build = self.get_build_for_comparison(package)
                if prev_build and prev_build.dependency_keys:
                    prev_deps = self.dependency_cache.get_by_ids(
                        prev_build.dependency_keys
                    )
                    changes = self.create_dependency_changes(
                        prev_deps, curr_deps, package_id=package.id,
                    )
            results.append(ResolutionOutput(
                package=package,
                prev_resolved=package.resolved,
                resolved=resolved,
                problems=set(curr_problems),
                changes=changes,
                # last_build_id is used to detect concurrently registered builds
                last_build_id=package.last_build_id,
            ))
            if len(results) > get_config('dependency.persist_chunk_size'):
                self.persist_resolution_output(results)
                results = []
            pkgs_done += 1
            current_time = time.time()
            time_diff = current_time - progres_reported_at
            if time_diff > get_config('dependency.perf_report_interval'):
                self.log.info(
                    "Resolution progress: resolved {} packages ({}%) ({} pkgs/min)"
                    .format(
                        pkgs_done,
                        int(pkgs_done / len(packages) * 100.0),
                        int((pkgs_done - pkgs_reported) / time_diff * 60.0)

                    )
                )
                pkgs_reported = pkgs_done
                progres_reported_at = current_time

        self.persist_resolution_output(results)
Beispiel #4
0
def secondary_koji_url(collection):
    """
    Return secondary Koji (the read-only one) web URL for given collection.
    For collections in primary mode, it returns URL of primary Koji.
    """
    if collection.secondary_mode:
        return get_config('secondary_koji_config.weburl')
    return get_config('koji_config.weburl')
Beispiel #5
0
def process_fedmsg(session, topic, msg):
    if (topic == get_config('copr.fedmsg_topic') and
            msg['msg']['user'] == get_config('copr.copr_owner')):
        rebuild = session.db.query(CoprRebuild)\
            .filter(CoprRebuild.copr_build_id == int(msg['msg']['build']))\
            .first()
        if rebuild:
            refresh_build_state(session, rebuild)
Beispiel #6
0
    def __init__(self):
        self._repos_dir = os.path.join(get_config('directories.cachedir'), 'repodata')
        self._capacity = get_config('dependency.cache_l2_capacity')
        self._repos = OrderedDict()

        for repo_descriptor, repo_path in sorted(self._read_cache()):
            self._ensure_capacity()
            self._repos[repo_descriptor] = repo_path
        log.debug('Added {} repos from disk'.format(len(self._repos)))
Beispiel #7
0
 def __init__(self):
     self.log = logging.getLogger('koschei.repo_cache.RepoCache')
     self.cachedir = os.path.join(get_config('directories.cachedir'), 'repodata')
     super(RepoCache, self).__init__(
         cachedir=self.cachedir,
         capacity=get_config('dependency.cache_l2_capacity'),
         log=self.log,
     )
     self.locked = []
Beispiel #8
0
 def get_time_priority_query(self):
     t0 = get_config('priorities.t0')
     t1 = get_config('priorities.t1')
     a = get_config('priorities.build_threshold') / (math.log10(t1) - math.log10(t0))
     b = -a * math.log10(t0)
     log_arg = func.greatest(0.000001, hours_since(func.max(Build.started)))
     time_expr = func.greatest(a * func.log(log_arg) + b, -30)
     return self.db.query(Build.package_id.label('pkg_id'),
                          time_expr.label('priority'))\
                   .group_by(Build.package_id)
Beispiel #9
0
    def main(self):
        prioritized = self.get_priorities()
        self.db.rollback()  # no-op, ends the transaction
        if (time.time() - self.calculation_timestamp
                > get_config('priorities.calculation_interval')):
            self.persist_priorities(prioritized)
        incomplete_builds = self.get_incomplete_builds_query().count()
        if incomplete_builds >= get_config('koji_config.max_builds'):
            self.log.debug("Not scheduling: {} incomplete builds"
                           .format(incomplete_builds))
            return
        koji_load = koji_util.get_koji_load(self.koji_sessions['primary'])
        if koji_load > get_config('koji_config.load_threshold'):
            self.log.debug("Not scheduling: {} koji load"
                           .format(koji_load))
            return

        for package_id, priority in prioritized:
            if priority < get_config('priorities.build_threshold'):
                self.log.debug("Not scheduling: no package above threshold")
                return
            package = self.db.query(Package).get(package_id)
            if package.collection.is_buildroot_broken():
                self.log.debug("Skipping {}: {} buildroot broken"
                               .format(package, package.collection))
                continue
            newer_build = self.backend.get_newer_build_if_exists(package)
            if newer_build:
                self.log.debug("Skipping {} due to real build"
                               .format(package))
                continue

            # a package was chosen
            self.log.info('Scheduling build for {}, priority {}'
                          .format(package.name, priority))
            build = self.backend.submit_build(package)
            package.current_priority = None
            package.scheduler_skip_reason = None
            package.manual_priority = 0

            if not build:
                self.log.debug("No SRPM found for {}".format(package.name))
                package.scheduler_skip_reason = Package.SKIPPED_NO_SRPM
                self.db.commit()
                continue

            self.db.commit()
            break
Beispiel #10
0
def copr_cleanup(session, older_than):
    session.log.debug('Cleaning up old copr projects')
    interval = "now() - '{} month'::interval".format(older_than)
    to_delete_ids = session.db.query(CoprRebuildRequest.id)\
        .filter(CoprRebuildRequest.timestamp < literal_column(interval))\
        .all_flat()
    if to_delete_ids:
        rebuilds = session.db.query(CoprRebuild)\
            .filter(CoprRebuild.request_id.in_(to_delete_ids))\
            .filter(CoprRebuild.copr_build_id != None)\
            .all()
        for rebuild in rebuilds:
            try:
                copr_client.delete_project(
                    username=get_config('copr.copr_owner'),
                    projectname=rebuild.copr_name,
                )
            except CoprException as e:
                if 'does not exist' not in str(e):
                    session.log.warn("Cannot delete copr project {}: {}"
                                     .format(rebuild.copr_name, e))
                    if rebuild.request_id in to_delete_ids:
                        to_delete_ids.remove(rebuild.request_id)
    if to_delete_ids:
        session.db.query(CoprRebuildRequest)\
            .filter(CoprRebuildRequest.id.in_(to_delete_ids))\
            .delete()
        for request_id in to_delete_ids:
            shutil.rmtree(get_request_cachedir(request_id), ignore_errors=True)
        session.log_user_action(
            "Cleanup: Deleted {} copr requests"
            .format(len(to_delete_ids))
        )
Beispiel #11
0
def get_cache():
    global __cache
    if __cache:
        return __cache
    __cache = dogpile.cache.make_region()
    __cache.configure(**get_config('pkgdb.cache'))
    return __cache
Beispiel #12
0
def itercall(koji_session, args, koji_call, chunk_size=None):
    """
    Function that simplifies handling large multicalls, which would normally timeout when
    accessing too much data at once. Splits the arguments into chunks and performs
    multiple multicalls on them.

    The usage:
    ```
    for task_info in itercall(koji_session, [1, 2, 3], lambda k, t: k.getTaskInfo(t)):
        print(task_info['id'])
    ```

    :param koji_session: The koji session used to make the multicalls
    :param args: A list of arguments that will be individually passed to `koji_call`
    :param koji_call: A function taking (koji_session, arg) arguments, where `arg` is a
                      single element from `args`. The function should call a single
                      Koji method call.
    :param chunk_size: How many args should go into a single chunk.
    :return: Generator of results from the individual koji method calls
    """
    if not chunk_size:
        chunk_size = get_config('koji_config.multicall_chunk_size')
    while args:
        koji_session.multicall = True
        for arg in args[:chunk_size]:
            koji_call(koji_session, arg)
        for info in koji_session.multiCall():
            if len(info) == 1:
                yield info[0]
            else:
                yield None
        args = args[chunk_size:]
Beispiel #13
0
def refresh_build_state(session, build):
    copr_build = copr_client.get_build_details(build.copr_build_id).data
    state = copr_build['chroots'][get_config('copr.chroot_name')]
    # Map of "finished" states of Copr builds
    # For reference, see function Build.finished()
    # at frontend/coprs_frontend/coprs/models.py (in Copr sources)
    state_map = {
        'succeeded': Build.COMPLETE,
        'failed': Build.FAILED,
        # Other states that should never be reachable:
        #  forked - Koschei does not use fork feature of Copr
        #  canceled - Koschei never cancels Copr builds
        #  skipped - Koschei always builds in empty Coprs
    }
    if state in state_map:
        build.state = state_map[state]
        session.log.info("Setting copr build {} to {}"
                         .format(build.copr_build_id, state))
        if session.db.query(CoprRebuild)\
                .filter_by(request_id=build.request_id)\
                .filter(CoprRebuild.state.notin_(Build.FINISHED_STATES) |
                        (CoprRebuild.state == None))\
                .count() == 0:
            build.request.state = 'finished'
    session.db.commit()
Beispiel #14
0
 def __init__(self, session):
     self.session = session
     self.db = session.db
     self.log = session.log = logging.getLogger(
         '{}.{}'.format(type(self).__module__, type(self).__name__),
     )
     self.service_config = get_config('services').get(self.get_name(), {})
Beispiel #15
0
 def consume(self, topic, msg):
     content = msg['msg']
     if content.get('instance') == get_config('fedmsg.instance'):
         self.log.debug('consuming ' + topic)
         if topic == self.get_topic('task.state.change'):
             self.update_build_state(content)
         elif topic == self.get_topic('tag'):
             self.register_real_build(content)
Beispiel #16
0
def emit_package_state_update(package, prev_state, new_state):
    if prev_state == new_state:
        return
    group_names = [group.full_name for group in package.groups]
    message = dict(topic='package.state.change',
                   modname=get_config('fedmsg-publisher.modname'),
                   msg={'name': package.name,
                        'old': prev_state,
                        'new': new_state,
                        'koji_instance': get_config('fedmsg.instance'),
                        # compat
                        'repo': package.collection.target_tag,
                        'collection': package.collection.name,
                        'collection_name': package.collection.display_name,
                        'groups': group_names})
    log.info('Publishing fedmsg:\n' + str(message))
    fedmsg.publish(**message)
Beispiel #17
0
def query_pagure(session, url):
    baseurl = get_config('pagure.api_url')
    req = requests.get(baseurl + '/' + url)
    if not req.ok:
        session.log.info("pagure query failed %s, status=%d", url,
                         req.status_code)
        return None
    return req.json()
Beispiel #18
0
def query_pagure(session, url):
    baseurl = get_config('pagure.api_url')
    req = requests.get(baseurl + '/' + url)
    if not req.ok:
        session.log.info("pagure query failed %s, status=%d",
                         url, req.status_code)
        return None
    return req.json()
Beispiel #19
0
def query_pkgdb(url):
    baseurl = get_config('pkgdb.pkgdb_url')
    req = requests.get(baseurl + '/' + url)
    if req.status_code != 200:
        log.info("pkgdb query failed %s, status=%d",
                 url, req.status_code)
        return None
    return req.json()
Beispiel #20
0
def bugreport(name):
    package = db.query(Package)\
                .filter(Package.name == name)\
                .filter(Package.blocked == False)\
                .filter(Package.last_complete_build_id != None)\
                .options(joinedload(Package.last_complete_build))\
                .first() or abort(404)
    variables = package.srpm_nvra or abort(404)
    variables['package'] = package
    variables['collection'] = package.collection
    variables['url'] = request.url_root + url_for('package_detail', name=package.name)
    template = get_config('bugreport.template')
    bug = {key: template[key].format(**variables) for key in template.keys()}
    bug['comment'] = dedent(bug['comment']).strip()
    query = urllib.urlencode(bug)
    bugreport_url = get_config('bugreport.url').format(query=query)
    return redirect(bugreport_url)
Beispiel #21
0
def main():
    if not os.path.exists('/tmp/maven-modulemd-gen/repodata'):
        os.makedirs('/tmp/maven-modulemd-gen/repodata')
    for module_coords in config.get_config('module_excludes', []):
        excludes.extend(get_module_components(*module_coords))
    log.info('Loading sack...')
    with repo_cache.RepoCache().get_sack(repo_descriptor) as sack:
        work(sack)
Beispiel #22
0
 def url(self):
     arch = get_config('dependency.repo_arch')
     topurl = get_koji_config(self.koji_id, 'topurl')
     url = '{topurl}/repos/{build_tag}/{repo_id}/{arch}'
     return url.format(topurl=topurl,
                       build_tag=self.build_tag,
                       repo_id=self.repo_id,
                       arch=arch)
Beispiel #23
0
def koji_build_to_osci_build(koji_build):
    osci_build = dict()
    osci_build['type'] = get_config('osci.build_artifact_type')
    osci_build['id'] = koji_build['id']
    osci_build['issuer'] = koji_build['owner_name']
    osci_build['component'] = koji_build['package_name']
    osci_build['nvr'] = koji_build['nvr']
    osci_build['scratch'] = False
    return osci_build
Beispiel #24
0
def publish_fedmsg(session, message):
    if not get_config('fedmsg-publisher.enabled', False):
        return
    message = fedmsg.Message(
        topic='{modname}.{topic}'.format(**message),
        body=message['msg'],
    )
    session.log.info('Publishing fedmsg:\n' + str(message))
    fedmsg.publish(message)
Beispiel #25
0
def refresh_monitored_packages(backend):
    try:
        if get_config('pkgdb.sync_tracked'):
            log.debug('Polling monitored packages...')
            packages = query_monitored_packages()
            if packages is not None:
                backend.sync_tracked(packages)
    except requests.ConnectionError:
        log.exception("Polling monitored packages failed, skipping cycle")
Beispiel #26
0
def refresh_monitored_packages(backend):
    try:
        if get_config('pkgdb.sync_tracked'):
            log.debug('Polling monitored packages...')
            packages = query_monitored_packages()
            if packages is not None:
                backend.sync_tracked(packages)
    except requests.ConnectionError:
        log.exception("Polling monitored packages failed, skipping cycle")
Beispiel #27
0
def bugreport(name):
    package = db.query(Package)\
                .filter(Package.name == name)\
                .filter(Package.blocked == False)\
                .filter(Package.last_complete_build_id != None)\
                .options(joinedload(Package.last_complete_build))\
                .first() or abort(404)
    variables = package.srpm_nvra or abort(404)
    variables['package'] = package
    variables['collection'] = package.collection
    variables['url'] = request.url_root + url_for('package_detail',
                                                  name=package.name)
    template = get_config('bugreport.template')
    bug = {key: template[key].format(**variables) for key in template.keys()}
    bug['comment'] = dedent(bug['comment']).strip()
    query = urllib.urlencode(bug)
    bugreport_url = get_config('bugreport.url').format(query=query)
    return redirect(bugreport_url)
Beispiel #28
0
def itercall(koji_session, args, koji_call):
    chunk_size = get_config('koji_config.multicall_chunk_size')
    while args:
        koji_session.multicall = True
        for arg in args[:chunk_size]:
            koji_call(koji_session, arg)
        for [info] in koji_session.multiCall():
            yield info
        args = args[chunk_size:]
Beispiel #29
0
def load_plugins(endpoint, only=None):
    if endpoint not in loaded:
        loaded[endpoint] = {}
        plugin_dir = os.path.join(os.path.dirname(__file__), endpoint,
                                  'plugins')
        for name in only if only is not None else get_config('plugins'):
            descriptor = imp.find_module(name, [plugin_dir])
            log.info('Loading %s plugin', name)
            loaded[endpoint][name] = imp.load_module(name, *descriptor)
Beispiel #30
0
 def url(self):
     """
     Produce URL where the repo can be downloaded.
     """
     arch = get_config('dependency.repo_arch')
     topurl = get_koji_config(self.koji_id, 'topurl')
     url = '{topurl}/repos/{build_tag}/{repo_id}/{arch}'
     return url.format(topurl=topurl, build_tag=self.build_tag,
                       repo_id=self.repo_id, arch=arch)
Beispiel #31
0
 def get_dependency_priority_query(self):
     update_weight = get_config('priorities.package_update')
     # pylint: disable=E1120
     distance = coalesce(UnappliedChange.distance, 8)
     # inner join with package last build to get rid of outdated dependency changes
     return self.db.query(UnappliedChange.package_id.label('pkg_id'),
                          (update_weight / distance)
                          .label('priority'))\
                   .join(Package,
                         Package.last_build_id == UnappliedChange.prev_build_id)
Beispiel #32
0
 def __init__(self, koji_id='primary', anonymous=True):
     """
     :param koji_id: either 'primary' or 'secondary'
     :param anonymous: whether to skip authentication
     """
     self.koji_id = koji_id
     self.config = get_config('koji_config' if koji_id ==
                              'primary' else 'secondary_koji_config')
     self.__anonymous = anonymous
     self.__proxied = self.__new_session()
Beispiel #33
0
 def main(self):
     for _, _, topic, msg in fedmsg.tail_messages():
         self.notify_watchdog()
         try:
             if topic.startswith(get_config('fedmsg.topic') + '.'):
                 self.consume(topic, msg)
             plugin.dispatch_event('fedmsg_event', self.session, topic, msg)
         finally:
             self.db.rollback()
         self.memory_check()
Beispiel #34
0
 def notify_watchdog(self):
     """
     Notify watchdog (if enabled) that the process is not stuck.
     """
     if get_config('services.{}.watchdog'.format(self.get_name()), None):
         path = os.environ.get('WATCHDOG_PATH', None)
         if not path:
             raise RuntimeError("WATCHDOG_PATH not set")
         with open(path, 'w'):
             pass
Beispiel #35
0
 def get_dependency_priority_query(self):
     update_weight = get_config('priorities.package_update')
     # pylint: disable=E1120
     distance = coalesce(UnappliedChange.distance, 8)
     # inner join with package last build to get rid of outdated dependency changes
     return self.db.query(UnappliedChange.package_id.label('pkg_id'),
                          (update_weight / distance)
                          .label('priority'))\
                   .join(Package,
                         Package.last_build_id == UnappliedChange.prev_build_id)
Beispiel #36
0
def koji_scratch_build(session, target_tag, name, source, build_opts):
    build_opts = prepare_build_opts(build_opts)
    logging.info('Intiating koji build for %(name)s:\n\tsource=%(source)s\
                 \n\ttarget=%(target)s\n\tbuild_opts=%(build_opts)s',
                 dict(name=name, target=target_tag, source=source,
                      build_opts=build_opts))
    task_id = session.build(source, target_tag, build_opts,
                            priority=get_config('koji_config.task_priority'))
    logging.info('Submitted koji scratch build for %s, task_id=%d', name, task_id)
    return task_id
Beispiel #37
0
 def __init__(self, log=None, db=None, koji_sessions=None,
              repo_cache=None, backend=None):
     super(Resolver, self).__init__(log=log, db=db,
                                    koji_sessions=koji_sessions)
     self.repo_cache = repo_cache or RepoCache()
     self.backend = backend or Backend(koji_sessions=self.koji_sessions,
                                       log=self.log, db=self.db)
     self.build_groups = {}
     capacity = get_config('dependency.dependency_cache_capacity')
     self.dependency_cache = DependencyCache(capacity=capacity)
Beispiel #38
0
def collection_has_schedulable_package(db, collection):
    priority_threshold = get_config('priorities.build_threshold')
    priority_expr = Package.current_priority_expression(Collection, Build)
    return db.query(Package) \
        .join(Package.collection) \
        .join(Package.last_build) \
        .filter(Package.collection_id == collection.id) \
        .filter(priority_expr != None) \
        .filter(priority_expr >= priority_threshold) \
        .limit(1).count()
Beispiel #39
0
def emit_package_state_update(session, package, prev_state, new_state):
    if prev_state == new_state:
        return
    group_names = [group.full_name for group in package.groups]
    message = dict(
        topic='package.state.change',
        modname=get_config('fedmsg-publisher.modname'),
        msg=dict(
            name=package.name,
            old=prev_state,
            new=new_state,
            koji_instance=get_config('fedmsg.instance'),
            repo=package.collection.name,  # compat only, same as collection
            collection=package.collection.name,
            collection_name=package.collection.display_name,
            groups=group_names,
        ),
    )
    publish_fedmsg(session, message)
Beispiel #40
0
 def __init__(self, koji_id='primary', anonymous=True):
     """
     :param koji_id: either 'primary' or 'secondary'
     :param anonymous: whether to skip authentication
     """
     self.koji_id = koji_id
     self.config = get_config('koji_config' if koji_id == 'primary' else
                              'secondary_koji_config')
     self.__anonymous = anonymous
     self.__proxied = self.__new_session()
Beispiel #41
0
def get_srpm_arches(koji_session, all_arches, nvra, arch_override=None,
                    build_arches=None):
    """
    Compute architectures that should be used for a build. Computation is based on the one
    in Koji (kojid/getArchList).

    :param koji_session: Koji session to be used for the query
    :param all_arches: List of all arches obtained from `get_koji_arches`
    :param nvra: NVRA dict of the SRPM
    :param arch_override: User specified arch override
    :param build_arches: List of allowed arches for building. Taken from config by default
    :return: Set of architectures that can be passed to `koji_scratch_build`. May be
             empty, in which case no build should be submitted.
    """
    archlist = all_arches
    tag_archlist = {koji.canonArch(a) for a in archlist}
    headers = koji_session.getRPMHeaders(
        rpmID=nvra,
        headers=['BUILDARCHS', 'EXCLUDEARCH', 'EXCLUSIVEARCH'],
    )
    if not headers:
        return None
    buildarchs = headers.get('BUILDARCHS', [])
    exclusivearch = headers.get('EXCLUSIVEARCH', [])
    excludearch = headers.get('EXCLUDEARCH', [])
    if buildarchs:
        archlist = buildarchs
    if exclusivearch:
        archlist = [arch for arch in archlist if arch in exclusivearch]
    if excludearch:
        archlist = [arch for arch in archlist if arch not in excludearch]

    if ('noarch' not in excludearch and
            ('noarch' in buildarchs or 'noarch' in exclusivearch)):
        archlist.append('noarch')

    if arch_override:
        # we also allow inverse overrides
        if arch_override.startswith('^'):
            excluded = {koji.canonArch(arch) for arch in arch_override[1:].split()}
            archlist = [arch for arch in archlist if koji.canonArch(arch) not in excluded]
        else:
            archlist = arch_override.split()

    if not build_arches:
        build_arches = get_config('koji_config').get('build_arches')
    build_arches = {koji.canonArch(arch) for arch in build_arches}
    allowed_arches = tag_archlist & build_arches

    arches = set()
    for arch in archlist:
        if arch == 'noarch' or koji.canonArch(arch) in allowed_arches:
            arches.add(arch)

    return arches
Beispiel #42
0
 def url(self):
     """
     Produce URL where the repo can be downloaded.
     """
     arch = get_config('dependency.repo_arch')
     topurl = get_koji_config(self.koji_id, 'topurl')
     url = '{topurl}/repos/{build_tag}/{repo_id}/{arch}'
     return url.format(topurl=topurl,
                       build_tag=self.build_tag,
                       repo_id=self.repo_id,
                       arch=arch)
Beispiel #43
0
def get_request_cachedir(request_or_id):
    "Returns path to request cache directory"
    if isinstance(request_or_id, CoprRebuildRequest):
        request_id = request_or_id.id
    else:
        request_id = request_or_id
    return os.path.join(
        get_config('directories.cachedir'),
        'user_repos',
        'copr-request-{}'.format(request_id),
    )
Beispiel #44
0
def get_srpm_arches(koji_session,
                    all_arches,
                    nvra,
                    arch_override=None,
                    build_arches=None):
    # compute arches the same way as koji
    # see kojid/getArchList
    archlist = all_arches
    tag_archlist = {koji.canonArch(a) for a in archlist}
    headers = koji_session.getRPMHeaders(
        rpmID=nvra,
        headers=['BUILDARCHS', 'EXCLUDEARCH', 'EXCLUSIVEARCH'],
    )
    if not headers:
        return None
    buildarchs = headers.get('BUILDARCHS', [])
    exclusivearch = headers.get('EXCLUSIVEARCH', [])
    excludearch = headers.get('EXCLUDEARCH', [])
    if buildarchs:
        archlist = buildarchs
    if exclusivearch:
        archlist = [arch for arch in archlist if arch in exclusivearch]
    if excludearch:
        archlist = [arch for arch in archlist if arch not in excludearch]

    if ('noarch' not in excludearch
            and ('noarch' in buildarchs or 'noarch' in exclusivearch)):
        archlist.append('noarch')

    if arch_override:
        # we also allow inverse overrides
        if arch_override.startswith('^'):
            excluded = {
                koji.canonArch(arch)
                for arch in arch_override[1:].split()
            }
            archlist = [
                arch for arch in archlist
                if koji.canonArch(arch) not in excluded
            ]
        else:
            archlist = arch_override.split()

    if not build_arches:
        build_arches = get_config('koji_config').get('build_arches')
    build_arches = {koji.canonArch(arch) for arch in build_arches}
    allowed_arches = tag_archlist & build_arches

    arches = set()
    for arch in archlist:
        if arch == 'noarch' or koji.canonArch(arch) in allowed_arches:
            arches.add(arch)

    return arches
Beispiel #45
0
def get_request_cachedir(request_or_id):
    "Returns path to request cache directory"
    if isinstance(request_or_id, CoprRebuildRequest):
        request_id = request_or_id.id
    else:
        request_id = request_or_id
    return os.path.join(
        get_config('directories.cachedir'),
        'user_repos',
        'copr-request-{}'.format(request_id),
    )
Beispiel #46
0
def get_artifact(session, repo_id, dest_tag):
    koji_session = session.koji('primary')
    repo = koji_session.repoInfo(repo_id)
    event_id = repo['create_event']
    builds = koji_session.listTagged(dest_tag, event_id, latest=True)
    artifact = dict()
    artifact['type'] = get_config('osci.build_group_artifact_type')
    artifact['builds'] = [koji_build_to_osci_build(b) for b in builds]
    artifact['repository'] = repo_path(repo_id, repo['tag_name'])
    artifact['id'] = artifact_id_from_builds(artifact['builds'])
    return artifact
Beispiel #47
0
 def run_service(self):
     name = self.__class__.__name__.lower()
     service_config = get_config('services').get(name, {})
     interval = service_config.get('interval', 3)
     self.log.info("{name} started".format(name=name))
     while True:
         try:
             self.main()
         finally:
             self.db.close()
         time.sleep(interval)
Beispiel #48
0
 def process_unresolved_build(self, build):
     """
     This function marks the build as unresolved and bumps priority of
     packages that have given builds as last.
     Packages that have last build unresolved cannot be resolved and
     should be treated as new packages (which they most likely are),
     because they cannot get priority from dependencies.
     """
     build.deps_resolved = False
     if build.package.last_build_id == build.id:
         build.package.build_priority = get_config('priorities.newly_added')
Beispiel #49
0
def get_last_srpm(koji_session, tag, name):
    rel_pathinfo = koji.PathInfo(topdir=get_config('koji_config.srpm_relative_path_root'))
    info = koji_session.listTagged(tag, latest=True,
                                   package=name, inherit=True)
    if info:
        srpms = koji_session.listRPMs(buildID=info[0]['build_id'],
                                      arches='src')
        if srpms:
            return (srpms[0],
                    rel_pathinfo.build(info[0]) + '/' +
                    rel_pathinfo.rpm(srpms[0]))
Beispiel #50
0
 def callback(message):
     self.notify_watchdog()
     topic = message.topic
     msg = {'msg': message.body}
     try:
         if topic.startswith(get_config('fedmsg.topic') + '.'):
             self.consume(topic, msg)
         plugin.dispatch_event('fedmsg_event', self.session, topic, msg)
     finally:
         self.db.rollback()
     self.memory_check()
Beispiel #51
0
def set_failed_build_priority(session, package, last_build):
    """
    Sets packages failed build priority based on the newly registered build.
    """
    failed_priority_value = get_config('priorities.failed_build_priority')
    if last_build.state == Build.FAILED:
        prev_build = session.db.query(Build)\
            .filter(Build.started < last_build.started)\
            .order_by(Build.started.desc())\
            .first()
        if not prev_build or prev_build.state != Build.FAILED:
            package.build_priority = failed_priority_value
Beispiel #52
0
def prepare_build_opts(opts=None):
    """
    Prepare build options for a scratch-build.

    :param opts: Additional options to be added.
    :return: A dictionary ready to be passed to `build_opts` argument of Koji's build call
    """
    build_opts = get_config('koji_config').get('build_opts', {}).copy()
    if opts:
        build_opts.update(opts)
    build_opts['scratch'] = True
    return build_opts
Beispiel #53
0
def get_packages_per_user(session):
    session.log.debug("Requesting pagure_owner_alias.json")
    req = requests.get(get_config('pagure.owner_alias_url'))
    if not req.ok:
        session.log.info("Failed to get pagure_owner_alias.json, status=%d",
                         req.status_code)
        return {}
    pkgs_per_user = {}
    for pkg, users in req.json()['rpms'].items():
        for user in users:
            pkgs_per_user.setdefault(user, []).append(pkg)
    return pkgs_per_user
Beispiel #54
0
def patch_config(key, value):
    config_dict = get_config(None)
    *parts, last = key.split('.')
    for part in parts:
        config_dict = config_dict[part]

    old_value = config_dict[last]
    config_dict[last] = value
    try:
        yield
    finally:
        config_dict[last] = old_value
Beispiel #55
0
 def cache(self, cache_id):
     if cache_id not in self._caches:
         import dogpile.cache
         import dogpile.cache.util
         # pylint: disable=not-context-manager
         with _cache_creation_lock:
             if cache_id not in self._caches:
                 cache = dogpile.cache.make_region(
                     key_mangler=(lambda key: dogpile.cache.util.
                                  sha1_mangle_key(key.encode())), )
                 cache.configure(**get_config('caching.' + cache_id))
                 self._caches[cache_id] = cache
     return self._caches[cache_id]