Пример #1
0
def clean_repo():
    """
    Clean up our mashed_dir, removing all referenced repositories
    """
    log.info("Starting clean_repo job")
    liverepos = []
    repos = config.get('mashed_dir')
    mash_locks = set()
    for release in Release.select():
        lock = join(repos, 'MASHING-%s' % release.id_prefix)
        mash_locks.add(lock)
        if exists(lock):
            log.info("Mash in progress.  Aborting clean_repo job")
            return
    for release in [rel.name.lower() for rel in Release.select()]:
        # TODO: keep the 2 most recent repos!
        for repo in [release + '-updates', release + '-updates-testing']:
            liverepos.append(dirname(realpath(join(repos, repo))))
    for repo in [join(repos, repo) for repo in os.listdir(repos)]:
        if 'repodata' in repo: # skip our repodata caches
            continue
        if not islink(repo) and isdir(repo):
            fullpath = realpath(repo)
            if fullpath not in liverepos:
                log.info("Removing %s" % fullpath)
                subprocess.call(['rm', '-fr', fullpath])

        # Bail out if a push started in the middle of this job
        for lock in mash_locks:
            if exists(lock):
                log.warning('Mash lock detected!  Stopping clean_repo job.')
                return

    log.info("clean_repo complete!")
Пример #2
0
    def index(self, release=None):
        # /updates/metrics?tg_format=json API
        if request_format() == 'json':
            json = {}
            query = release and [Release.byName(release)] or Release.select()
            for release in query:
                json[release.name] = release.metrics
            return json

        try:
            if not release:
                rel = Release.select()[0]
                release = rel.name
            else:
                rel = Release.byName(release)
        except SQLObjectNotFound:
            flash("Unknown Release")
            raise redirect('/metrics')
        widgets = MetricData().get_widgets(release)
        if not widgets:
            return dict(metrics=[], title="Metrics currently unavailable")
        return dict(metrics=[
            widgets[name.__name__] for name in metrics
            if name.__name__ in widgets
        ],
                    title="%s Update Metrics" % rel.long_name)
Пример #3
0
    def index(self, release=None):
        # /updates/metrics?tg_format=json API
        if request_format() == "json":
            json = {}
            query = release and [Release.byName(release)] or Release.select()
            for release in query:
                json[release.name] = release.metrics
            return json

        try:
            if not release:
                rel = Release.select()[0]
                release = rel.name
            else:
                rel = Release.byName(release)
        except SQLObjectNotFound:
            flash("Unknown Release")
            raise redirect("/metrics")
        widgets = MetricData().get_widgets(release)
        if not widgets:
            return dict(metrics=[], title="Metrics currently unavailable")
        return dict(
            metrics=[widgets[name.__name__] for name in metrics if name.__name__ in widgets],
            title="%s Update Metrics" % rel.long_name,
        )
Пример #4
0
def clean_repo():
    """
    Clean up our mashed_dir, removing all referenced repositories
    """
    log.info("Starting clean_repo job")
    liverepos = []
    repos = config.get('mashed_dir')
    mash_locks = set()
    for release in Release.select():
        lock = join(repos, 'MASHING-%s' % release.id_prefix)
        mash_locks.add(lock)
        if exists(lock):
            log.info("Mash in progress.  Aborting clean_repo job")
            return
    for release in [rel.name.lower() for rel in Release.select()]:
        # TODO: keep the 2 most recent repos!
        for repo in [release + '-updates', release + '-updates-testing']:
            liverepos.append(dirname(realpath(join(repos, repo))))
    for repo in [join(repos, repo) for repo in os.listdir(repos)]:
        if 'repodata' in repo:  # skip our repodata caches
            continue
        if not islink(repo) and isdir(repo):
            fullpath = realpath(repo)
            if fullpath not in liverepos:
                log.info("Removing %s" % fullpath)
                subprocess.call(['rm', '-fr', fullpath])

        # Bail out if a push started in the middle of this job
        for lock in mash_locks:
            if exists(lock):
                log.warning('Mash lock detected!  Stopping clean_repo job.')
                return

    log.info("clean_repo complete!")
Пример #5
0
 def test_epel7_tags(self):
     el7 = Release(name='EPEL-7', long_name='Fedora EPEL 7',
                   id_prefix='FEDORA-EPEL', dist_tag='epel7')
     assert el7.get_version() == 7
     assert el7.candidate_tag == 'epel7-testing-candidate'
     assert el7.testing_tag == 'epel7-testing'
     assert el7.stable_tag == 'epel7'
     assert el7.stable_repo == 'epel7'
Пример #6
0
def get_rel():
    rel = None
    try:
        rel = Release.byName('fc7')
    except SQLObjectNotFound:
        rel = Release(name='fc7', long_name='Fedora 7', id_prefix='FEDORA',
                      dist_tag='dist-fc7')
    return rel
Пример #7
0
 def _get_epel_release(self):
     rel = Release.select(Release.q.name=='EL5')
     if rel.count():
         rel = rel[0]
     else:
         rel = Release(name='EL5', long_name='Fedora EPEL 5', id_prefix='FEDORA-EPEL',
                       dist_tag='dist-5E-epel')
     return rel
Пример #8
0
 def test_epel7_tags(self):
     el7 = Release(name='EPEL-7', long_name='Fedora EPEL 7',
                   id_prefix='FEDORA-EPEL', dist_tag='epel7')
     assert el7.get_version() == 7
     assert el7.candidate_tag == 'epel7-testing-candidate'
     assert el7.testing_tag == 'epel7-testing'
     assert el7.stable_tag == 'epel7'
     assert el7.stable_repo == 'epel7'
Пример #9
0
def clean_tables():
    from bodhi.model import Release, Package
    print "Cleaning out tables"
    Release.dropTable(ifExists=True, cascade=True)
    Package.dropTable(ifExists=True, cascade=True)
    hub.commit()
    Release.createTable(ifNotExists=True)
    Package.createTable(ifNotExists=True)
Пример #10
0
def clean_tables():
    from bodhi.model import Release, Package
    print "Cleaning out tables"
    Release.dropTable(ifExists=True, cascade=True)
    Package.dropTable(ifExists=True, cascade=True)
    hub.commit()
    Release.createTable(ifNotExists=True)
    Package.createTable(ifNotExists=True)
Пример #11
0
def main():
    load_config()
    print "Calculating F11 0day update metrics..."
    updates = {
        'bugfix': [],
        'security': [],
        'enhancement': [],
        'newpackage': []
    }
    date = datetime(*time.strptime('06-09-2009', '%m-%d-%Y')[:-2])
    f11 = Release.byName('F11')
    for update in PackageUpdate.select(PackageUpdate.q.releaseID == f11.id):
        for comment in update.comments:
            if comment.author == 'bodhi' and comment.timestamp < date and \
               comment.text.startswith('This update has been pushed to stable'):
                updates[update.type].append(update.title)
                break

    pprint(updates)
    print '=' * 80
    print 'F11 0day stats'
    print ' * %d security' % len(updates['security'])
    print ' * %d bugfixes' % len(updates['bugfix'])
    print ' * %d enhancements' % len(updates['enhancement'])
    print ' * %d newpackage' % len(updates['newpackage'])
Пример #12
0
def save_db():
    ## Save each release and it's metrics
    releases = []
    for release in Release.select():
        rel = {}
        for attr in ('name', 'long_name', 'id_prefix', 'dist_tag',
                     'locked', 'metrics'):
            rel[attr] = getattr(release, attr)
        releases.append(rel)

    updates = []
    all_updates = PackageUpdate.select()
    progress = ProgressBar(maxValue=all_updates.count())

    for update in all_updates:
        data = {}
        data['title'] = update.title
        data['builds'] = [(build.package.name, build.nvr) for build in update.builds]
        data['date_submitted'] = update.date_submitted
        data['date_pushed'] = update.date_pushed
        data['date_modified'] = update.date_modified
        data['release'] = [update.release.name, update.release.long_name,
                           update.release.id_prefix, update.release.dist_tag]
        data['submitter'] = update.submitter
        data['update_id'] = hasattr(update, 'update_id') and update.update_id or update.updateid
        data['type'] = update.type
        data['karma'] = update.karma
        data['cves'] = [cve.cve_id for cve in update.cves]
        data['bugs'] = []
        for bug in update.bugs:
            data['bugs'].append([bug.bz_id, bug.title, bug.security])
            if hasattr(bug, 'parent'):
                data['bugs'][-1].append(bug.parent)
            else:
                data['bugs'][-1].append(False)
        data['status'] = update.status
        data['pushed'] = update.pushed
        data['notes'] = update.notes
        data['request'] = update.request
        data['comments'] = [(c.timestamp, c.author, c.text, c.karma, c.anonymous) for c in update.comments]
        if hasattr(update, 'approved'):
            data['approved'] = update.approved
        else:
            data['approved'] = None

        updates.append(data)
        progress()

    # Save all buildroot overrides
    overrides = []
    for override in BuildRootOverride.select():
        try:
            overrides.append(override.__json__())
        except:
            print("Removing stray override: %s" % override)
            override.destroySelf()

    dump = file('bodhi-pickledb-%s' % time.strftime("%y%m%d.%H%M"), 'w')
    pickle.dump({'updates': updates, 'releases': releases, 'overrides': overrides}, dump)
    dump.close()
Пример #13
0
    def list(self,
             build=None,
             tg_errors=None,
             mine=False,
             release=None,
             show_expired=False,
             **kw):
        query = []
        title = '%d Buildroot Overrides'
        if mine:
            query.append(
                BuildRootOverride.q.submitter == identity.current.user_name)
            title += ' submitted by %s' % identity.current.user_name
        if release:
            rel = Release.byName(release)
            query.append(BuildRootOverride.q.releaseID == rel.id)
            title += ' for %s' % rel.long_name
        if not show_expired:
            query.append(BuildRootOverride.q.date_expired == None)

        overrides = BuildRootOverride.select(AND(*query))

        if request_format() == 'json':
            overrides = [o.__json__() for o in overrides]
            num_items = len(overrides)
        else:
            num_items = overrides.count()
        return dict(overrides=overrides,
                    title=title % num_items,
                    num_items=num_items,
                    show_expired=show_expired,
                    mine=mine)
Пример #14
0
    def get_widgets(self, release):
        """ Return the metrics for a specified release.

        If our metric widgets are more than a day old, recreate them with
        fresh metrics from our database.
        """
        if self.age and get_age_in_days(self.age) < 1:
            return self.widgets[release]

        log.debug("Generating some fresh metric widgets...")
        freshwidgets = {}
        for rel in Release.select():
            if not rel.metrics:
                log.warning("No metrics found for %s" % rel.name)
                return
            self.init_metrics(rel)
            if not freshwidgets.has_key(rel.name):
                freshwidgets[rel.name] = {}
            for metric in self.metrics:
                widget = metric.get_widget(
                    rel.metrics[metric.__class__.__name__])
                if widget:
                    freshwidgets[rel.name][metric.__class__.__name__] = widget

        self.widgets = freshwidgets
        self.age = datetime.utcnow()
        return self.widgets[release]
Пример #15
0
def main():
    load_config()
    __connection__ = hub = PackageHub("bodhi")
    if len(sys.argv) != 2:
        print "Usage: %s <release>" % sys.argv[0]
        sys.exit(1)
    try:
        release = Release.byName(sys.argv[1].upper())
    except SQLObjectNotFound:
        print "Cannot find Release '%s'" % sys.argv[1]
        sys.exit(1)

    updates = PackageUpdate.select(PackageUpdate.q.releaseID == release.id)
    progress = ProgressBar(maxValue=updates.count())
    print "Destroying all updates, comments, and bugs associated with %s" % release.name

    for update in updates:
        for comment in update.comments:
            comment.destroySelf()
        for build in update.builds:
            build.destroySelf()
        for bug in update.bugs:
            if len(bug.updates) == 1:
                bug.destroySelf()
        update.destroySelf()
        progress()

    release.destroySelf()
    hub.commit()
    print
Пример #16
0
    def get_widgets(self, release):
        """ Return the metrics for a specified release.

        If our metric widgets are more than a day old, recreate them with
        fresh metrics from our database.
        """
        if self.age and get_age_in_days(self.age) < 1:
            return self.widgets[release]

        log.debug("Generating some fresh metric widgets...")
        freshwidgets = {}
        for rel in Release.select():
            if not rel.metrics:
                log.warning("No metrics found for %s" % rel.name)
                return
            self.init_metrics(rel)
            if not freshwidgets.has_key(rel.name):
                freshwidgets[rel.name] = {}
            for metric in self.metrics:
                widget = metric.get_widget(rel.metrics[metric.__class__.__name__])
                if widget:
                    freshwidgets[rel.name][metric.__class__.__name__] = widget

        self.widgets = freshwidgets
        self.age = datetime.utcnow()
        return self.widgets[release]
Пример #17
0
    def refresh(self):
        """ Refresh all of the metrics for all releases.

        For each release, initialize our metrics objects, and feed them every
        update for that release.  Do the necessary calculations, and then save
        our metrics to the database in the Release.metrics PickleCol.
        """
        log.info("Doing a hard refresh of our metrics data")
        metrics = {}
        updates = {}  # {release: [updates,]}
        all_updates = list(PackageUpdate.select())
        releases = list(Release.select())
        for release in releases:
            updates[release.name] = []
        for update in all_updates:
            updates[update.release.name].append(update)
        for release in releases:
            log.debug("Calculating metrics for %s" % release.name)
            self.init_metrics(release)
            for update in updates[release.name]:
                for metric in self.metrics:
                    metric.update(update)
            for metric in self.metrics:
                metric.done()
                metrics[metric.__class__.__name__] = metric.get_data()
            release.metrics = metrics
        hub.commit()
        del all_updates
        del releases
        log.info("Metrics generation complete!")
Пример #18
0
    def list(self, build=None, tg_errors=None, mine=False, release=None, 
             show_expired=False, **kw):
        query = []
        title = '%d Buildroot Overrides'
        if mine:
            show_expired = True
            query.append(
                BuildRootOverride.q.submitter == identity.current.user_name)
            title += ' submitted by %s' % identity.current.user_name
        if release:
            rel = Release.byName(release)
            query.append(
                BuildRootOverride.q.releaseID == rel.id)
            title += ' for %s' % rel.long_name
        if not show_expired:
            query.append(
                BuildRootOverride.q.date_expired == None)

        overrides = BuildRootOverride.select(AND(*query))

        if request_format() == 'json':
            overrides = [o.__json__() for o in overrides]
            num_items = len(overrides)
        else:
            num_items = overrides.count()
        return dict(overrides=overrides,
                    title=title % num_items,
                    num_items=num_items,
                    show_expired=show_expired,
                    mine=mine)
Пример #19
0
    def refresh(self):
        """ Refresh all of the metrics for all releases.

        For each release, initialize our metrics objects, and feed them every
        update for that release.  Do the necessary calculations, and then save
        our metrics to the database in the Release.metrics PickleCol.
        """
        log.info("Doing a hard refresh of our metrics data")
        metrics = {}
        updates = {}  # {release: [updates,]}
        all_updates = list(PackageUpdate.select())
        releases = list(Release.select())
        for release in releases:
            updates[release.name] = []
        for update in all_updates:
            updates[update.release.name].append(update)
        for release in releases:
            log.debug("Calculating metrics for %s" % release.name)
            self.init_metrics(release)
            for update in updates[release.name]:
                for metric in self.metrics:
                    metric.update(update)
            for metric in self.metrics:
                metric.done()
                metrics[metric.__class__.__name__] = metric.get_data()
            release.metrics = metrics
        hub.commit()
        del all_updates
        del releases
        log.info("Metrics generation complete!")
Пример #20
0
 def get_security_updates(self, release):
     release = Release.select(Release.q.long_name==release)[0]
     return PackageUpdate.select(
             AND(PackageUpdate.q.releaseID == release.id,
                 PackageUpdate.q.type == 'security',
                 PackageUpdate.q.status == 'testing',
                 PackageUpdate.q.request == None))
Пример #21
0
Файл: rss.py Проект: tyll/bodhi
    def get_critpath_updates(self, release=None, unapproved=None):
        i = 0
        entries = []
        base = config.get('base_address')
        title = 'Latest Critical Path Updates'
        query = [PackageUpdate.q.status != 'obsolete']
        if release:
            try:
                release = Release.byName(release)
            except SQLObjectNotFound:
                return dict(title='%s release not found' % release, entries=[])
            releases = [release]
            title = title + ' for %s' % release.long_name
        else:
            releases = Release.select()
        if unapproved:
            query.append(PackageUpdate.q.status != 'stable')
        for update in PackageUpdate.select(
                AND(
                    OR(*[
                        PackageUpdate.q.releaseID == release.id
                        for release in releases
                    ]), *query),
                orderBy=PackageUpdate.q.date_submitted).reversed():

            delta = datetime.utcnow() - update.date_submitted
            if delta and delta.days > config.get('feeds.num_days_to_show'):
                if len(entries) >= config.get('feeds.max_entries'):
                    break

            if update.critpath:
                if unapproved:
                    if update.critpath_approved:
                        continue
                entries.append({
                    'id': base + url(update.get_url()),
                    'summary': update.notes,
                    'link': base + url(update.get_url()),
                    'published': update.date_submitted,
                    'updated': update.date_submitted,
                    'title': update.title,
                })
                i += 1
        return dict(title=title,
                    subtitle="",
                    link=config.get('base_address') + url('/'),
                    entries=entries)
Пример #22
0
    def get_critpath_updates(self, release=None, unapproved=None):
        i = 0
        entries = []
        base = config.get('base_address')
        title = 'Latest Critical Path Updates'
        query = [PackageUpdate.q.status != 'obsolete']
        if release:
            try:
                release = Release.byName(release)
            except SQLObjectNotFound:
                return dict(title = '%s release not found' % release, entries=[])
            releases = [release]
            title = title + ' for %s' % release.long_name
        else:
            releases = Release.select()
        if unapproved:
            query.append(PackageUpdate.q.status != 'stable')
        for update in PackageUpdate.select(
                AND(OR(*[PackageUpdate.q.releaseID == release.id
                         for release in releases]),
                    *query),
                orderBy=PackageUpdate.q.date_submitted).reversed():

            delta = datetime.utcnow() - update.date_submitted
            if delta and delta.days > config.get('feeds.num_days_to_show'):
                if len(entries) >= config.get('feeds.max_entries'):
                    break

            if update.critpath:
                if unapproved:
                    if update.critpath_approved:
                        continue
                entries.append({
                    'id'        : base + url(update.get_url()),
                    'summary'   : update.notes,
                    'link'      : base + url(update.get_url()),
                    'published' : update.date_submitted,
                    'updated'   : update.date_submitted,
                    'title'     : update.title,
                })
                i += 1
        return dict(
                title = title,
                subtitle = "",
                link = config.get('base_address') + url('/'),
                entries = entries
        )
Пример #23
0
def get_rel():
    rel = None
    try:
        rel = Release.byName('fc7')
    except SQLObjectNotFound:
        rel = Release(name='fc7', long_name='Fedora 7', id_prefix='FEDORA',
                      dist_tag='dist-fc7')
    return rel
Пример #24
0
 def _get_epel_release(self):
     rel = Release.select(Release.q.name=='EL5')
     if rel.count():
         rel = rel[0]
     else:
         rel = Release(name='EL5', long_name='Fedora EPEL 5', id_prefix='FEDORA-EPEL',
                       dist_tag='dist-5E-epel')
     return rel
Пример #25
0
 def get_security_updates(self, release):
     release = Release.select(Release.q.long_name == release)[0]
     updates = PackageUpdate.select(
         AND(PackageUpdate.q.releaseID == release.id,
             PackageUpdate.q.type == 'security',
             PackageUpdate.q.status == 'testing',
             PackageUpdate.q.request == None))
     updates = self.sort_by_days_in_testing(updates)
     return updates
Пример #26
0
 def get_security_updates(self, release):
     release = Release.select(Release.q.long_name == release)[0]
     updates = PackageUpdate.select(
         AND(PackageUpdate.q.releaseID == release.id,
             PackageUpdate.q.type == 'security',
             PackageUpdate.q.status == 'testing',
             PackageUpdate.q.request == None))
     updates = self.sort_by_days_in_testing(updates)
     return updates
Пример #27
0
def save_db():
    ## Save each release and it's metrics
    releases = []
    for release in Release.select():
        rel = {}
        for attr in ('name', 'long_name', 'id_prefix', 'dist_tag', 'locked',
                     'metrics'):
            rel[attr] = getattr(release, attr)
        releases.append(rel)

    updates = []
    all_updates = PackageUpdate.select()
    progress = ProgressBar(maxValue=all_updates.count())

    for update in all_updates:
        data = {}
        data['title'] = update.title
        data['builds'] = [(build.package.name, build.nvr)
                          for build in update.builds]
        data['date_submitted'] = update.date_submitted
        data['date_pushed'] = update.date_pushed
        data['date_modified'] = update.date_modified
        data['release'] = [
            update.release.name, update.release.long_name,
            update.release.id_prefix, update.release.dist_tag
        ]
        data['submitter'] = update.submitter
        data['update_id'] = hasattr(
            update, 'update_id') and update.update_id or update.updateid
        data['type'] = update.type
        data['karma'] = update.karma
        data['cves'] = [cve.cve_id for cve in update.cves]
        data['bugs'] = []
        for bug in update.bugs:
            data['bugs'].append([bug.bz_id, bug.title, bug.security])
            if hasattr(bug, 'parent'):
                data['bugs'][-1].append(bug.parent)
            else:
                data['bugs'][-1].append(False)
        data['status'] = update.status
        data['pushed'] = update.pushed
        data['notes'] = update.notes
        data['request'] = update.request
        data['comments'] = [(c.timestamp, c.author, c.text, c.karma,
                             c.anonymous) for c in update.comments]
        if hasattr(update, 'approved'):
            data['approved'] = update.approved
        else:
            data['approved'] = None

        updates.append(data)
        progress()

    dump = file('bodhi-pickledb-%s' % time.strftime("%y%m%d.%H%M"), 'w')
    pickle.dump({'updates': updates, 'releases': releases}, dump)
    dump.close()
Пример #28
0
def save_db():
    ## Save each release and it's metrics
    releases = []
    for release in Release.select():
        rel = {}
        for attr in ("name", "long_name", "id_prefix", "dist_tag", "locked", "metrics"):
            rel[attr] = getattr(release, attr)
        releases.append(rel)

    updates = []
    all_updates = PackageUpdate.select()
    progress = ProgressBar(maxValue=all_updates.count())

    for update in all_updates:
        data = {}
        data["title"] = update.title
        data["builds"] = [(build.package.name, build.nvr) for build in update.builds]
        data["date_submitted"] = update.date_submitted
        data["date_pushed"] = update.date_pushed
        data["date_modified"] = update.date_modified
        data["release"] = [
            update.release.name,
            update.release.long_name,
            update.release.id_prefix,
            update.release.dist_tag,
        ]
        data["submitter"] = update.submitter
        data["update_id"] = hasattr(update, "update_id") and update.update_id or update.updateid
        data["type"] = update.type
        data["karma"] = update.karma
        data["cves"] = [cve.cve_id for cve in update.cves]
        data["bugs"] = []
        for bug in update.bugs:
            data["bugs"].append([bug.bz_id, bug.title, bug.security])
            if hasattr(bug, "parent"):
                data["bugs"][-1].append(bug.parent)
            else:
                data["bugs"][-1].append(False)
        data["status"] = update.status
        data["pushed"] = update.pushed
        data["notes"] = update.notes
        data["request"] = update.request
        data["comments"] = [(c.timestamp, c.author, c.text, c.karma, c.anonymous) for c in update.comments]
        if hasattr(update, "approved"):
            data["approved"] = update.approved
        else:
            data["approved"] = None

        updates.append(data)
        progress()

    dump = file("bodhi-pickledb-%s" % time.strftime("%y%m%d.%H%M"), "w")
    pickle.dump({"updates": updates, "releases": releases}, dump)
    dump.close()
Пример #29
0
def import_releases():
    """ Import the releases """
    from bodhi.model import Release
    print "\nInitializing Release table"
    progress = ProgressBar(maxValue=len(releases))

    for release in releases:
        rel = Release(name=release['name'], long_name=release['long_name'],
                      id_prefix=release['id_prefix'],
                      dist_tag=release['dist_tag'])
        progress()
Пример #30
0
 def get_unapproved_critpath_updates(self, release):
     release = Release.select(Release.q.long_name==release)[0]
     updates = []
     for update in PackageUpdate.select(
             AND(PackageUpdate.q.releaseID == release.id,
                 PackageUpdate.q.status != 'stable',
                 PackageUpdate.q.status != 'obsolete',
                 PackageUpdate.q.request == None),
             orderBy=PackageUpdate.q.date_submitted).reversed():
         if update.critpath and not update.critpath_approved:
             updates.append(update)
     return updates
Пример #31
0
 def get_unapproved_critpath_updates(self, release):
     release = Release.select(Release.q.long_name == release)[0]
     updates = []
     for update in PackageUpdate.select(
             AND(PackageUpdate.q.releaseID == release.id,
                 PackageUpdate.q.status == 'testing',
                 PackageUpdate.q.request == None),
             orderBy=PackageUpdate.q.date_submitted).reversed():
         if update.critpath and not update.critpath_approved:
             updates.append(update)
     updates = self.sort_by_days_in_testing(updates)
     return updates
Пример #32
0
 def get_unapproved_critpath_updates(self, release):
     release = Release.select(Release.q.long_name == release)[0]
     updates = []
     for update in PackageUpdate.select(
             AND(PackageUpdate.q.releaseID == release.id,
                 PackageUpdate.q.status == 'testing',
                 PackageUpdate.q.request == None),
             orderBy=PackageUpdate.q.date_submitted).reversed():
         if update.critpath and not update.critpath_approved:
             updates.append(update)
     updates = self.sort_by_days_in_testing(updates)
     return updates
Пример #33
0
    def send_digest_mail(self):
        """
        Send digest mail to mailing lists
        """
        for prefix, content in self.testing_digest.items():
            log.debug("Sending digest for updates-testing %s" % prefix)
            maildata = u""
            try:
                security_updates = self.get_security_updates(prefix)
                if security_updates:
                    maildata += u"The following %s Security updates need testing:\n Age  URL\n" % prefix
                    for update in security_updates:
                        maildata += u" %3i  %s%s\n" % (
                            update.days_in_testing,
                            config.get("base_address"),
                            url(update.get_url()),
                        )
                    maildata += "\n\n"

                critpath_updates = self.get_unapproved_critpath_updates(prefix)
                if critpath_updates:
                    maildata += u"The following %s Critical Path updates have yet to be approved:\n Age URL\n" % prefix
                    for update in self.get_unapproved_critpath_updates(prefix):
                        maildata += u" %3i  %s%s\n" % (
                            update.days_in_testing,
                            config.get("base_address"),
                            url(update.get_url()),
                        )
                    maildata += "\n\n"
            except Exception, e:
                log.exception(e)

            maildata += u"The following builds have been pushed to %s updates-testing\n\n" % prefix
            # get a list af all nvr's
            updlist = content.keys()
            # sort the list
            updlist.sort()
            # Add the list of builds to the mail
            for pkg in updlist:
                maildata += u"    %s\n" % pkg
            # Add some space between the short list and the Details"
            maildata += u"\nDetails about builds:\n\n"
            # Add the detail of each build
            for nvr in updlist:
                maildata += u"\n" + self.testing_digest[prefix][nvr]
            release = Release.select(Release.q.long_name == prefix)[0]
            mail.send_mail(
                config.get("bodhi_email"),
                config.get("%s_test_announce_list" % release.id_prefix.lower().replace("-", "_")),
                "%s updates-testing report" % prefix,
                maildata,
            )
Пример #34
0
def clean_tables():
    from bodhi.model import (Release, Package, PackageBuild, PackageUpdate,
                             Comment, CVE, Bugzilla, BuildRootOverride)
    from bodhi.identity.tables import (Visit, VisitIdentity, Group, User,
                                       Permission)

    print "Cleaning out tables"
    Release.dropTable(ifExists=True, cascade=True)
    Package.dropTable(ifExists=True, cascade=True)
    PackageBuild.dropTable(ifExists=True, cascade=True)
    PackageUpdate.dropTable(ifExists=True, cascade=True)
    Comment.dropTable(ifExists=True, cascade=True)
    CVE.dropTable(ifExists=True, cascade=True)
    Bugzilla.dropTable(ifExists=True, cascade=True)
    BuildRootOverride.dropTable(ifExists=True, cascade=True)

    Visit.dropTable(ifExists=True, cascade=True)
    VisitIdentity.dropTable(ifExists=True, cascade=True)
    Group.dropTable(ifExists=True, cascade=True)
    User.dropTable(ifExists=True, cascade=True)
    Permission.dropTable(ifExists=True, cascade=True)

    hub.commit()

    Release.createTable(ifNotExists=True)
    Package.createTable(ifNotExists=True)
    PackageBuild.createTable(ifNotExists=True)
    PackageUpdate.createTable(ifNotExists=True)
    Comment.createTable(ifNotExists=True)
    CVE.createTable(ifNotExists=True)
    Bugzilla.createTable(ifNotExists=True)
    BuildRootOverride.createTable(ifNotExists=True)

    Visit.createTable(ifNotExists=True)
    VisitIdentity.createTable(ifNotExists=True)
    Group.createTable(ifNotExists=True)
    User.createTable(ifNotExists=True)
    Permission.createTable(ifNotExists=True)
Пример #35
0
 def masher(self):
     """ Display the current status of the Masher """
     if config.get('masher'):
         data = self._masher_request('/admin/masher')
         if not data:
             data = {'masher_str': 'Unable to contact the masher', 'tags': []}
         return dict(masher_str=data['masher_str'], tags=data['tags'])
     else:
         from bodhi.masher import masher
         tags = []
         for release in Release.select():
             tags.append(release.stable_tag)
             tags.append(release.testing_tag)
         return dict(masher_str=str(masher), tags=tags)
Пример #36
0
 def _fetch_candidate_builds(self, pkg):
     """ Return all candidate builds for a given package """
     matches = {}
     koji = get_session()
     koji.multicall = True
     for tag in [r.candidate_tag for r in Release.select()]:
         koji.listTagged(tag, package=pkg)
     results = koji.multiCall()
     for result in results:
         for entries in result:
             for entry in entries:
                 matches[entry['nvr']] = entry['completion_time']
     return [build[0] for build in
             sorted(matches.items(), key=itemgetter(1), reverse=True)]
Пример #37
0
 def _fetch_candidate_builds(self, pkg):
     """ Return all candidate builds for a given package """
     matches = {}
     koji = get_session()
     koji.multicall = True
     for tag in [r.candidate_tag for r in Release.select()]:
         koji.getLatestBuilds(tag, package=pkg)
     results = koji.multiCall()
     for result in results:
         for entries in result:
             for entry in entries:
                 matches[entry['nvr']] = entry['completion_time']
     return [build[0] for build in
             sorted(matches.items(), key=itemgetter(1), reverse=True)]
Пример #38
0
 def masher(self):
     """ Display the current status of the Masher """
     if config.get('masher'):
         data = self._masher_request('/admin/masher')
         if not data:
             data = {'masher_str': 'Unable to contact the masher','tags': []}
         return dict(masher_str=data['masher_str'], tags=data['tags'])
     else:
         from bodhi.masher import masher
         tags = []
         for release in Release.select():
             tags.append(release.stable_tag)
             tags.append(release.testing_tag)
         return dict(masher_str=str(masher), tags=tags)
Пример #39
0
def clean_stable_builds(untag=False):
    koji = get_session()
    for release in Release.select():
        latest_stable_builds = koji.listTagged(release.stable_tag, latest=True)
        latest_stable_nvrs = [build['nvr'] for build in latest_stable_builds]
        print "Fetched %d latest stable builds tagged with %s" % (
            len(latest_stable_builds), release.stable_tag)
        stable_builds = koji.listTagged(release.stable_tag)
        stable_nvrs = [build['nvr'] for build in stable_builds]
        print "Fetched %d stable builds tagged with %s" % (len(stable_builds),
                                                           release.stable_tag)
        for latest_build in latest_stable_builds:
            for build in stable_builds:
                if build['nvr'] == latest_build['nvr']:
                    continue
                compare_builds(latest_build, build, untag, release.stable_tag)
Пример #40
0
def clean_stable_builds(untag=False):
    koji = get_session()
    for release in Release.select():
        latest_stable_builds = koji.listTagged(release.stable_tag, latest=True)
        latest_stable_nvrs = [build['nvr'] for build in latest_stable_builds]
        print "Fetched %d latest stable builds tagged with %s" % (
                len(latest_stable_builds), release.stable_tag)
        stable_builds = koji.listTagged(release.stable_tag)
        stable_nvrs = [build['nvr'] for build in stable_builds]
        print "Fetched %d stable builds tagged with %s" % (
                len(stable_builds), release.stable_tag)
        for latest_build in latest_stable_builds:
            for build in stable_builds:
                if build['nvr'] == latest_build['nvr']:
                    continue
                compare_builds(latest_build, build, untag, release.stable_tag)
Пример #41
0
def main():
    unstable = subprocess.Popen(
        'grep "\[Fedora Update\] \[unstable\]" bodhi.logs',
        stdout=subprocess.PIPE,
        shell=True)
    out, err = unstable.communicate()
    (unstable_updates, unstable_critpath, unstable_deltas, unstable_accum,
     unstable_occur) = parse_output(out)

    stable = subprocess.Popen(
        'grep "\[Fedora Update\] \[stablekarma\]" bodhi.logs',
        stdout=subprocess.PIPE,
        shell=True)
    out, err = stable.communicate()
    (stable_updates, stable_critpath, stable_deltas, stable_accum,
     stable_occur) = parse_output(out)

    for release in Release.select():
        print '\n' + header(release.long_name)
        num_updates = PackageUpdate.select(
            PackageUpdate.q.releaseID == release.id).count()
        num_stable = len(stable_updates[release.name])
        num_unstable = len(unstable_updates[release.name])
        num_testing = len(unstable_deltas) + len(stable_deltas)
        print " * %d updates automatically unpushed due to karma (%0.2f%%)" % (
            num_unstable, float(num_unstable) / num_updates * 100)
        print "   * %d of which were critical path updates" % (
            unstable_critpath[release.name])
        print " * %d updates automatically pushed due to karma (%0.2f%%)" % (
            num_stable, float(num_stable) / num_updates * 100)
        print "   * %d of which were critical path updates" % (
            stable_critpath[release.name])

        print " * Time spent in testing of updates that were pushed by karma:"
        print "   * mean = %d days" % (stable_accum.days / len(stable_deltas))
        print "   * median = %d days" % stable_deltas[len(stable_deltas) /
                                                      2].days
        print "   * mode = %d days" % sorted(stable_occur.items(),
                                             key=itemgetter(1))[-1][0]

        print " * Time spent in testing of updates that were unpushed by karma:"
        print "   * mean = %d days" % (unstable_accum.days /
                                       len(unstable_deltas))
        print "   * median = %d days" % unstable_deltas[len(unstable_deltas) /
                                                        2].days
        print "   * mode = %d days" % sorted(unstable_occur.items(),
                                             key=itemgetter(1))[-1][0]
Пример #42
0
    def send_digest_mail(self):
        '''
        Send digest mail to mailing lists
        '''
        for prefix, content in self.testing_digest.items():
            log.debug("Sending digest for updates-testing %s" % prefix)
            maildata = u''
            try:
                security_updates = self.get_security_updates(prefix)
                if security_updates:
                    maildata += u'The following %s Security updates need testing:\n Age  URL\n' % prefix
                    for update in security_updates:
                        maildata += u' %3i  %s%s\n' % (
                            update.days_in_testing, config.get('base_address'),
                            url(update.get_url()))
                    maildata += '\n\n'

                critpath_updates = self.get_unapproved_critpath_updates(prefix)
                if critpath_updates:
                    maildata += u'The following %s Critical Path updates have yet to be approved:\n Age URL\n' % prefix
                    for update in self.get_unapproved_critpath_updates(prefix):
                        maildata += u' %3i  %s%s\n' % (
                            update.days_in_testing, config.get('base_address'),
                            url(update.get_url()))
                    maildata += '\n\n'
            except Exception, e:
                log.exception(e)

            maildata += u'The following builds have been pushed to %s updates-testing\n\n' % prefix
            # get a list af all nvr's
            updlist = content.keys()
            # sort the list
            updlist.sort()
            # Add the list of builds to the mail
            for pkg in updlist:
                maildata += u'    %s\n' % pkg
            # Add some space between the short list and the Details"
            maildata += u'\nDetails about builds:\n\n'
            # Add the detail of each build
            for nvr in updlist:
                maildata += u"\n" + self.testing_digest[prefix][nvr]
            release = Release.select(Release.q.long_name == prefix)[0]
            mail.send_mail(
                config.get('bodhi_email'),
                config.get('%s_test_announce_list' %
                           release.id_prefix.lower().replace('-', '_')),
                '%s updates-testing report' % prefix, maildata)
Пример #43
0
def clean_testing_builds(untag=False):
    koji = get_session()
    for release in Release.select():
        stable_builds = koji.listTagged(release.stable_tag, latest=True)
        stable_nvrs = [build["nvr"] for build in stable_builds]
        print "Fetched %d builds tagged with %s" % (len(stable_builds), release.stable_tag)
        testing_builds = koji.listTagged(release.testing_tag, latest=True)
        print "Fetched %d builds tagged with %s" % (len(testing_builds), release.testing_tag)
        testing_nvrs = [build["nvr"] for build in testing_builds]
        for testing_build in testing_builds:
            for build in testing_builds:
                compare_builds(testing_build, build, untag, release.testing_tag)
            for build in stable_builds:
                compare_builds(testing_build, build, untag, release.testing_tag)

        # Find testing updates that aren't in the list of latest builds
        for update in PackageUpdate.select(
            AND(
                PackageUpdate.q.releaseID == release.id,
                PackageUpdate.q.status == "testing",
                PackageUpdate.q.request == None,
            )
        ):
            for build in update.builds:
                if build.nvr not in testing_nvrs:
                    latest_testing = None
                    latest_stable = None
                    for testing in testing_nvrs:
                        if testing.startswith(build.package.name + "-"):
                            latest_testing = testing
                            break
                    for stable in stable_nvrs:
                        if stable.startswith(build.package.name + "-"):
                            latest_stable = stable
                            break
                    if latest_testing:
                        koji_build = koji.getBuild(build.nvr)
                        latest_build = koji.getBuild(latest_testing)
                        if rpm.labelCompare(build_evr(koji_build), build_evr(latest_build)) < 0:
                            print "%s in testing, latest_testing = %s, latest_stable = %s" % (
                                update.title,
                                latest_testing,
                                latest_stable,
                            )
                            if untag:
                                print "Obsoleting %s" % update.title
                                update.obsolete(newer=latest_testing)
Пример #44
0
def clean_testing_builds(untag=False):
    koji = get_session()
    for release in Release.select():
        stable_builds = koji.listTagged(release.stable_tag, latest=True)
        stable_nvrs = [build['nvr'] for build in stable_builds]
        print "Fetched %d builds tagged with %s" % (len(stable_builds),
                                                    release.stable_tag)
        testing_builds = koji.listTagged(release.testing_tag, latest=True)
        print "Fetched %d builds tagged with %s" % (len(testing_builds),
                                                    release.testing_tag)
        testing_nvrs = [build['nvr'] for build in testing_builds]
        for testing_build in testing_builds:
            for build in testing_builds:
                compare_builds(testing_build, build, untag,
                               release.testing_tag)
            for build in stable_builds:
                compare_builds(testing_build, build, untag,
                               release.testing_tag)

        # Find testing updates that aren't in the list of latest builds
        for update in PackageUpdate.select(
                AND(PackageUpdate.q.releaseID == release.id,
                    PackageUpdate.q.status == 'testing',
                    PackageUpdate.q.request == None)):
            for build in update.builds:
                if build.nvr not in testing_nvrs:
                    latest_testing = None
                    latest_stable = None
                    for testing in testing_nvrs:
                        if testing.startswith(build.package.name + '-'):
                            latest_testing = testing
                            break
                    for stable in stable_nvrs:
                        if stable.startswith(build.package.name + '-'):
                            latest_stable = stable
                            break
                    if latest_testing:
                        koji_build = koji.getBuild(build.nvr)
                        latest_build = koji.getBuild(latest_testing)
                        if rpm.labelCompare(build_evr(koji_build),
                                            build_evr(latest_build)) < 0:
                            print "%s in testing, latest_testing = %s, latest_stable = %s" % (
                                update.title, latest_testing, latest_stable)
                            if untag:
                                print "Obsoleting %s" % update.title
                                update.obsolete(newer=latest_testing)
Пример #45
0
    def send_digest_mail(self):
        '''
        Send digest mail to mailing lists
        '''
        for prefix, content in self.testing_digest.items():
            log.debug("Sending digest for updates-testing %s" % prefix)
            maildata = u''
            try:
                security_updates = self.get_security_updates(prefix)
                if security_updates:
                    maildata += u'The following %s Security updates need testing:\n\n' % prefix
                    for update in security_updates:
                        maildata += u'    %s\n' % (config.get('base_address') + url(update.get_url()))
                    maildata += '\n\n'

                critpath_updates = self.get_unapproved_critpath_updates(prefix)
                if critpath_updates:
                    maildata += u'The following %s Critical Path updates have yet to be approved:\n\n' % prefix
                    for update in self.get_unapproved_critpath_updates(prefix):
                        maildata += u'    %s\n' % (config.get('base_address') + url(update.get_url()))
                    maildata += '\n\n'
            except Exception, e:
                log.exception(e)

            maildata += u'The following builds have been pushed to %s updates-testing\n\n' % prefix
            # get a list af all nvr's
            updlist = content.keys()
            # sort the list
            updlist.sort()
            # Add the list of builds to the mail
            for pkg in updlist:
                maildata += u'    %s\n' % pkg
            # Add some space between the short list and the Details"
            maildata += u'\nDetails about builds:\n\n'
            # Add the detail of each build
            for nvr in updlist:
                maildata += u"\n" + self.testing_digest[prefix][nvr]
            release = Release.select(Release.q.long_name==prefix)[0]
            mail.send_mail(config.get('bodhi_email'),
                      config.get('%s_test_announce_list' %
                          release.id_prefix.lower().replace('-', '_')),
                      '%s updates-testing report' % prefix,
                      maildata)
Пример #46
0
    def test_id(self):
        update = self.get_update()
        update.assign_id()
        assert update.updateid == '%s-%s-0001' % (update.release.id_prefix,
                                                  time.localtime()[0])
        assert update.date_pushed
        update = self.get_update(name='TurboGears-0.4.4-8.fc7')
        update.assign_id()
        assert update.updateid == '%s-%s-0002' % (update.release.id_prefix,
                                                  time.localtime()[0])

        # Create another update for another release that has the same
        # Release.id_prefix.  This used to trigger a bug that would cause
        # duplicate IDs across Fedora 10/11 updates.
        update = self.get_update(name='nethack-3.4.5-1.fc11')
        otherrel = Release(name='fc11',
                           long_name='Fedora 11',
                           id_prefix='FEDORA',
                           dist_tag='dist-fc11')
        update.release = otherrel
        update.assign_id()
        assert update.updateid == '%s-%s-0003' % (update.release.id_prefix,
                                                  time.localtime()[0])

        # 10k bug
        update.updateid = 'FEDORA-%s-9999' % YEAR
        newupdate = self.get_update(name='nethack-2.5.6-1.fc10')
        newupdate.assign_id()
        assert newupdate.updateid == 'FEDORA-%s-10000' % YEAR

        newerupdate = self.get_update(name='nethack-2.5.7-1.fc10')
        newerupdate.assign_id()
        assert newerupdate.updateid == 'FEDORA-%s-10001' % YEAR

        # test updates that were pushed at the same time.  assign_id should
        # be able to figure out which one has the highest id.
        now = datetime.utcnow()
        newupdate.date_pushed = now
        newerupdate.date_pushed = now

        newest = self.get_update(name='nethack-2.5.8-1.fc10')
        newest.assign_id()
        assert newest.updateid == 'FEDORA-%s-10002' % YEAR
Пример #47
0
    def test_epel_id(self):
        """ Make sure we can handle id_prefixes that contain dashes. eg: FEDORA-EPEL """
        # Create a normal Fedora update first
        update = self.get_update()
        update.assign_id()
        assert update.updateid == 'FEDORA-%s-0001' % time.localtime()[0]

        update = self.get_update(name='TurboGears-2.1-1.el5')
        release = Release(name='EL-5', long_name='Fedora EPEL 5',
                          dist_tag='dist-5E-epel', id_prefix='FEDORA-EPEL')
        update.release = release
        update.assign_id()
        assert update.updateid == 'FEDORA-EPEL-%s-0001' % time.localtime()[0]

        update = self.get_update(name='TurboGears-2.2-1.el5')
        update.release = release
        update.assign_id()
        assert update.updateid == '%s-%s-0002' % (release.id_prefix,
                                                  time.localtime()[0]), update.updateid
Пример #48
0
def main():
    load_config()
    print "Calculating F11 0day update metrics..."
    updates = {'bugfix': [], 'security': [], 'enhancement': [], 'newpackage': []}
    date = datetime(*time.strptime('06-09-2009', '%m-%d-%Y')[:-2])
    f11 = Release.byName('F11')
    for update in PackageUpdate.select(PackageUpdate.q.releaseID==f11.id):
        for comment in update.comments:
            if comment.author == 'bodhi' and comment.timestamp < date and \
               comment.text.startswith('This update has been pushed to stable'):
                updates[update.type].append(update.title)
                break

    pprint(updates)
    print '=' * 80
    print 'F11 0day stats'
    print ' * %d security' % len(updates['security'])
    print ' * %d bugfixes' % len(updates['bugfix'])
    print ' * %d enhancements' % len(updates['enhancement'])
    print ' * %d newpackage' % len(updates['newpackage'])
Пример #49
0
def main():
    unstable = subprocess.Popen('grep "\[Fedora Update\] \[unstable\]" bodhi.logs',
                                stdout=subprocess.PIPE, shell=True)
    out, err = unstable.communicate()
    (unstable_updates, unstable_critpath, unstable_deltas,
     unstable_accum, unstable_occur) = parse_output(out)

    stable = subprocess.Popen('grep "\[Fedora Update\] \[stablekarma\]" bodhi.logs',
                              stdout=subprocess.PIPE, shell=True)
    out, err = stable.communicate()
    (stable_updates, stable_critpath, stable_deltas,
     stable_accum, stable_occur) = parse_output(out)

    for release in Release.select():
        print '\n' + header(release.long_name)
        num_updates = PackageUpdate.select(
                PackageUpdate.q.releaseID==release.id).count()
        num_stable = len(stable_updates[release.name])
        num_unstable = len(unstable_updates[release.name])
        num_testing = len(unstable_deltas) + len(stable_deltas)
        print " * %d updates automatically unpushed due to karma (%0.2f%%)" % (
                num_unstable, float(num_unstable) / num_updates * 100)
        print "   * %d of which were critical path updates" % (
                unstable_critpath[release.name])
        print " * %d updates automatically pushed due to karma (%0.2f%%)" % (
                num_stable, float(num_stable) / num_updates * 100)
        print "   * %d of which were critical path updates" % (
                stable_critpath[release.name])

        print " * Time spent in testing of updates that were pushed by karma:"
        print "   * mean = %d days" % (stable_accum.days / len(stable_deltas))
        print "   * median = %d days" % stable_deltas[len(stable_deltas)/2].days
        print "   * mode = %d days" % sorted(stable_occur.items(),
                                             key=itemgetter(1))[-1][0]

        print " * Time spent in testing of updates that were unpushed by karma:"
        print "   * mean = %d days" % (unstable_accum.days / len(unstable_deltas))
        print "   * median = %d days" % unstable_deltas[len(unstable_deltas)/2].days
        print "   * mode = %d days" % sorted(unstable_occur.items(),
                                             key=itemgetter(1))[-1][0]
Пример #50
0
def clean_pending_tags():
    """ Clean up any stray pending tags """
    koji = get_session()
    for release in Release.select():
        log.info("Finding all stray pending-testing builds...")
        if release.name.startswith('E'):
            continue

        tag = release.pending_testing_tag
        tagged = [build['nvr'] for build in koji.listTagged(tag)]
        for nvr in tagged:
            try:
                build = PackageBuild.byNvr(nvr)
                for update in build.updates:
                    if update.status in ('testing', 'stable', 'obsolete'):
                        log.info("%s %s" % (nvr, update.status))
                        log.info("Untagging %s" % nvr)
                        koji.untagBuild(tag, nvr, force=True)
            except SQLObjectNotFound:
                log.info("Can't find build for %s" % nvr)
                log.info("Untagging %s" % nvr)
                koji.untagBuild(tag, nvr, force=True)

        log.info("Finding all stray pending-stable builds...")
        tag = release.pending_stable_tag
        tagged = [build['nvr'] for build in koji.listTagged(tag)]
        for nvr in tagged:
            try:
                build = PackageBuild.byNvr(nvr)
                for update in build.updates:
                    if update.status in ('pending', 'obsolete', 'stable'):
                        log.info("%s %s" % (nvr, update.status))
                        log.info("Untagging %s" % nvr)
                        koji.untagBuild(tag, nvr, force=True)
            except SQLObjectNotFound:
                log.info("Untagging %s" % nvr)
                koji.untagBuild(tag, nvr, force=True)
Пример #51
0
def clean_pending_tags():
    """ Clean up any stray pending tags """
    koji = get_session()
    for release in Release.select():
        log.info("Finding all stray pending-testing builds...")
        if release.name.startswith('EL'):
            continue

        tag = release.pending_testing_tag
        tagged = [build['nvr'] for build in koji.listTagged(tag)]
        for nvr in tagged:
            try:
                build = PackageBuild.byNvr(nvr)
                for update in build.updates:
                    if update.status in ('testing', 'stable', 'obsolete'):
                        log.info("%s %s" % (nvr, update.status))
                        log.info("Untagging %s" % nvr)
                        koji.untagBuild(tag, nvr, force=True)
            except SQLObjectNotFound:
                log.info("Can't find build for %s" % nvr)
                log.info("Untagging %s" % nvr)
                koji.untagBuild(tag, nvr, force=True)

        log.info("Finding all stray pending-stable builds...")
        tag = release.pending_stable_tag
        tagged = [build['nvr'] for build in koji.listTagged(tag)]
        for nvr in tagged:
            try:
                build = PackageBuild.byNvr(nvr)
                for update in build.updates:
                    if update.status in ('pending', 'obsolete', 'stable'):
                        log.info("%s %s" % (nvr, update.status))
                        log.info("Untagging %s" % nvr)
                        koji.untagBuild(tag, nvr, force=True)
            except SQLObjectNotFound:
                log.info("Untagging %s" % nvr)
                koji.untagBuild(tag, nvr, force=True)
Пример #52
0
        for build in iterate(builds):
            release = None
            n, v, r = get_nvr(build)

            # Make sure the build is tagged correctly
            try:
                tags = [tag["name"] for tag in koji.listTags(build)]
            except Exception, e:
                flash(str(e))
                if request_format() == "json":
                    return dict()
                raise redirect("/override/new")

            # Determine the release by the tag, and sanity check the builds
            for tag in tags:
                for rel in Release.select():
                    if tag in (rel.candidate_tag, rel.testing_tag):
                        release = last_release = rel
                    elif tag == rel.stable_tag:
                        flash("Error: %s is already tagged with %s" % (build, tag))
                        if request_format() == "json":
                            return dict()
                        raise redirect("/override/new")

            if not release:
                flash("Error: Could not determine release for %s with tags %s" % (build, map(str, tags)))
                if request_format() == "json":
                    return dict()
                raise redirect("/override/new")

            # Make sure the user has commit rights to the appropriate branch
Пример #53
0
def main():
    load_config()
    stats = {}  # {release: {'stat': ...}}
    feedback = 0  # total number of updates that received feedback
    karma = defaultdict(int)  # {username: # of karma submissions}
    num_updates = PackageUpdate.select().count()
    proventesters = set()

    for release in Release.select():
        print header(release.long_name)
        updates = PackageUpdate.select(PackageUpdate.q.releaseID == release.id)
        stats[release.name] = {
            'num_updates':
            updates.count(),
            'num_tested':
            0,
            'num_tested_without_karma':
            0,
            'num_feedback':
            0,
            'num_anon_feedback':
            0,
            'num_critpath':
            0,
            'num_critpath_approved':
            0,
            'num_critpath_unapproved':
            0,
            'num_stablekarma':
            0,
            'num_testingtime':
            0,
            'critpath_without_karma':
            set(),
            'conflicted_proventesters': [],
            'critpath_positive_karma_including_proventesters': [],
            'critpath_positive_karma_negative_proventesters': [],
            'stable_with_negative_karma':
            PackageUpdate.select(
                AND(PackageUpdate.q.releaseID == release.id,
                    PackageUpdate.q.status == 'stable',
                    PackageUpdate.q.karma < 0)).count(),
            'bugs':
            set(),
            'karma':
            defaultdict(int),
            'deltas': [],
            'occurrences': {},
            'accumulative':
            timedelta(),
            'packages':
            defaultdict(int),
            'proventesters':
            set(),
            'proventesters_1':
            0,
            'proventesters_0':
            0,
            'proventesters_-1':
            0,
            # for tracking number of types of karma
            '1':
            0,
            '0':
            0,
            '-1':
            0,
        }
        data = stats[release.name]

        for status in statuses:
            data['num_%s' % status] = PackageUpdate.select(
                AND(PackageUpdate.q.releaseID == release.id,
                    PackageUpdate.q.status == status)).count()

        for type in types:
            data['num_%s' % type] = PackageUpdate.select(
                AND(PackageUpdate.q.releaseID == release.id,
                    PackageUpdate.q.type == type)).count()

        for update in release.updates:
            for build in update.builds:
                data['packages'][build.package] += 1
            for bug in update.bugs:
                data['bugs'].add(bug.bz_id)

            feedback_done = False
            testingtime_done = False

            for comment in update.comments:
                if comment.author_name in ('autoqa', 'taskotron'):
                    continue

                # Track the # of +1's, -1's, and +0's.
                if comment.author_name != 'bodhi':
                    data[str(comment.karma)] += 1

                if comment.author_group == 'proventesters':
                    data['proventesters'].add(comment.author_name)
                    data['proventesters_%d' % comment.karma] += 1

                if comment.text == 'This update has reached the stable karma threshold and will be pushed to the stable updates repository':
                    data['num_stablekarma'] += 1
                elif comment.text and comment.text.endswith(
                        'days in testing and can be pushed to stable now if the maintainer wishes'
                ):
                    data['num_testingtime'] += 1

                # For figuring out if an update has received feedback or not
                if not feedback_done:
                    if (not comment.author.startswith('bodhi')
                            and comment.karma != 0 and not comment.anonymous):
                        data[
                            'num_feedback'] += 1  # per-release tracking of feedback
                        feedback += 1  # total number of updates that have received feedback
                        feedback_done = True  # so we don't run this for each comment

                # Tracking per-author karma & anonymous feedback
                if not comment.author.startswith('bodhi'):
                    if comment.anonymous:
                        # @@: should we track anon +0 comments as "feedback"?
                        if comment.karma != 0:
                            data['num_anon_feedback'] += 1
                    else:
                        author = comment.author_name
                        data['karma'][author] += 1
                        karma[author] += 1

                if (not testingtime_done and comment.text
                        == 'This update has been pushed to testing'):
                    for othercomment in update.comments:
                        if othercomment.text == 'This update has been pushed to stable':
                            delta = othercomment.timestamp - comment.timestamp
                            data['deltas'].append(delta)
                            data['occurrences'][delta.days] = \
                                data['occurrences'].setdefault(
                                        delta.days, 0) + 1
                            data['accumulative'] += delta
                            testingtime_done = True
                            break

            if update.critpath:
                if update.critpath_approved or update.status == 'stable':
                    data['num_critpath_approved'] += 1
                else:
                    if status in ('testing', 'pending'):
                        data['num_critpath_unapproved'] += 1
                data['num_critpath'] += 1
                #if not feedback_done:
                if update.status == 'stable' and update.karma == 0:
                    data['critpath_without_karma'].add(update)

                # Proventester metrics
                proventester_karma = defaultdict(int)  # {username: karma}
                positive_proventesters = 0
                negative_proventesters = 0
                for comment in update.comments:
                    if comment.author_group == 'proventesters':
                        proventester_karma[
                            comment.author_name] += comment.karma
                for _karma in proventester_karma.values():
                    if _karma > 0:
                        positive_proventesters += 1
                    elif _karma < 0:
                        negative_proventesters += 1

                # Conflicting proventesters
                if positive_proventesters and negative_proventesters:
                    data['conflicted_proventesters'] += [short_url(update)]

                # Track updates with overall positive karma, including positive
                # karma from a proventester
                if update.karma > 0 and positive_proventesters:
                    data[
                        'critpath_positive_karma_including_proventesters'] += [
                            short_url(update)
                        ]

                # Track updates with overall positive karma, including negative
                # karma from a proventester
                if update.karma > 0 and negative_proventesters:
                    data['critpath_positive_karma_negative_proventesters'] += [
                        short_url(update)
                    ]

            if testingtime_done:
                data['num_tested'] += 1
                if not feedback_done:
                    data['num_tested_without_karma'] += 1

        data['deltas'].sort()

        print " * %d updates" % data['num_updates']
        print " * %d packages updated" % (len(data['packages']))
        for status in statuses:
            print " * %d %s updates" % (data['num_%s' % status], status)
        for type in types:
            print " * %d %s updates (%0.2f%%)" % (
                data['num_%s' % type], type,
                float(data['num_%s' % type]) / data['num_updates'] * 100)
        print " * %d bugs resolved" % len(data['bugs'])
        print " * %d critical path updates (%0.2f%%)" % (
            data['num_critpath'],
            float(data['num_critpath']) / data['num_updates'] * 100)
        print " * %d approved critical path updates" % (
            data['num_critpath_approved'])
        print " * %d unapproved critical path updates" % (
            data['num_critpath_unapproved'])
        print " * %d updates received feedback (%0.2f%%)" % (
            data['num_feedback'],
            (float(data['num_feedback']) / data['num_updates'] * 100))
        print " * %d +0 comments" % data['0']
        print " * %d +1 comments" % data['1']
        print " * %d -1 comments" % data['-1']
        print " * %d unique authenticated karma submitters" % (len(
            data['karma']))
        print " * %d proventesters" % len(data['proventesters'])
        print "   * %d +1's from proventesters" % data['proventesters_1']
        print "   * %d -1's from proventesters" % data['proventesters_-1']
        if data['num_critpath']:
            print " * %d critpath updates with conflicting proventesters (%0.2f%% of critpath)" % (
                len(data['conflicted_proventesters']),
                float(len(data['conflicted_proventesters'])) /
                data['num_critpath'] * 100)
            for u in data['conflicted_proventesters']:
                print "   * " + u
            print " * %d critpath updates with positive karma and negative proventester feedback (%0.2f%% of critpath)" % (
                len(data['critpath_positive_karma_negative_proventesters']),
                float(
                    len(data['critpath_positive_karma_negative_proventesters'])
                ) / data['num_critpath'] * 100)
            for u in data['critpath_positive_karma_negative_proventesters']:
                print "   * " + u
            print " * %d critpath updates with positive karma and positive proventester feedback (%0.2f%% of critpath)" % (
                len(data['critpath_positive_karma_including_proventesters']),
                float(
                    len(data['critpath_positive_karma_including_proventesters']
                        )) / data['num_critpath'] * 100)
        print " * %d anonymous users gave feedback (%0.2f%%)" % (
            data['num_anon_feedback'], float(data['num_anon_feedback']) /
            (data['num_anon_feedback'] + sum(data['karma'].values())) * 100)
        # This does not take into account updates that reach stablekarma before being pushed to testing!
        #        print " * %d out of %d stable updates went through testing (%0.2f%%)" %(
        #                data['num_tested'], data['num_stable'],
        #                float(data['num_tested']) / data['num_stable'] * 100)
        print " * %d updates reached the stable karma threshold (%0.2f%%)" % (
            data['num_stablekarma'],
            float(data['num_stablekarma']) / data['num_stable'] * 100)
        print " * %d updates reached the minimum time in testing threshold (%0.2f%%)" % (
            data['num_testingtime'],
            float(data['num_testingtime']) / data['num_stable'] * 100)
        print " * %d went from testing to stable *without* karma (%0.2f%%)" % (
            data['num_tested_without_karma'],
            float(data['num_tested_without_karma']) / data['num_tested'] * 100)
        print " * %d updates were pushed to stable with negative karma (%0.2f%%)" % (
            data['stable_with_negative_karma'],
            float(data['stable_with_negative_karma']) / data['num_stable'] *
            100)
        print " * %d critical path updates pushed to stable *without* karma" % (
            len(data['critpath_without_karma']))
        #for update in data['critpath_without_karma']:
        #    print "   * %s submitted by %s" % (update.title, update.submitter)
        print " * Time spent in testing:"
        print "   * mean = %d days" % (data['accumulative'].days /
                                       len(data['deltas']))
        print "   * median = %d days" % (data['deltas'][len(data['deltas']) /
                                                        2].days)
        print "   * mode = %d days" % (sorted(data['occurrences'].items(),
                                              key=itemgetter(1))[-1][0])
        #for package in sorted(data['packages'].items(), key=itemgetter(1), reverse=True):
        #    print "    * %s: %d" % (package[0].name, package[1])
        print

    print
    print "Out of %d total updates, %d received feedback (%0.2f%%)" % (
        num_updates, feedback, (float(feedback) / num_updates * 100))
    print "Out of %d total unique commenters, the top 50 are:" % (len(karma))
    for submitter in sorted(karma.iteritems(), key=itemgetter(1),
                            reverse=True)[:50]:
        print " * %s (%d)" % (submitter[0], submitter[1])
Пример #54
0
Файл: rss.py Проект: tyll/bodhi
    def get_feed_data(self,
                      release=None,
                      type=None,
                      status=None,
                      comments=False,
                      submitter=None,
                      builds=None,
                      user=None,
                      package=None,
                      critpath=False,
                      unapproved=None,
                      *args,
                      **kw):
        query = []
        entries = []
        date = lambda update: update.date_pushed
        order = PackageUpdate.q.date_pushed
        title = []
        critpath = critpath in (True, 'True', 'true')
        unapproved = unapproved in (True, 'True', 'true')

        if critpath:
            return self.get_critpath_updates(release=release,
                                             unapproved=unapproved)
        if comments:
            return self.get_latest_comments(user=user)
        if package:
            return self.get_package_updates(package, release)
        if release:
            try:
                rel = Release.byName(release.upper())
            except SQLObjectNotFound:
                return dict(title='%s not found' % release, entries=[])
            query.append(PackageUpdate.q.releaseID == rel.id)
            title.append(rel.long_name)
        if type:
            query.append(PackageUpdate.q.type == type)
            title.append(type.title())
        if status:
            query.append(PackageUpdate.q.status == status)
            if status == 'pending':
                date = lambda update: update.date_submitted
                order = PackageUpdate.q.date_submitted
            else:
                # Let's only show pushed testing/stable updates
                query.append(PackageUpdate.q.pushed == True)
            title.append(status.title())
        else:
            query.append(PackageUpdate.q.pushed == True)

        if submitter:
            query.append(PackageUpdate.q.submitter == submitter)
            title.append("submitted by %s" % submitter)

        if builds:
            query.append(PackageUpdate.q.builds == builds)
            title.append("for %s" % builds)

        updates = PackageUpdate.select(AND(*query), orderBy=order).reversed()

        for update in updates:
            delta = datetime.utcnow() - update.date_submitted
            if delta and delta.days > config.get('feeds.num_days_to_show'):
                if len(entries) >= config.get('feeds.max_entries'):
                    break
            entries.append({
                'id':
                config.get('base_address') + url(update.get_url()),
                'summary':
                update.notes,
                'published':
                date(update),
                'link':
                config.get('base_address') + url(update.get_url()),
                'title':
                "%s %sUpdate: %s" %
                (update.release.long_name,
                 update.type == 'security' and 'Security ' or '', update.title)
            })
            if len(update.bugs):
                bugs = "<b>Resolved Bugs</b><br/>"
                for bug in update.bugs:
                    bugs += "<a href=%s>%d</a> - %s<br/>" % (
                        bug.get_url(), bug.bz_id, bug.title)
                entries[-1]['summary'] = "%s<br/>%s" % (bugs[:-2],
                                                        entries[-1]['summary'])

        title.append('Updates')

        return dict(title=' '.join(title),
                    subtitle="",
                    link=config.get('base_address') + url('/'),
                    entries=entries)
Пример #55
0
def main():
    load_config()
    stats = {}  # {release: {'stat': ...}}
    feedback = 0  # total number of updates that received feedback
    karma = defaultdict(int)  # {username: # of karma submissions}
    num_updates = PackageUpdate.select().count()
    proventesters = set()

    for release in Release.select():
        print header(release.long_name)
        updates = PackageUpdate.select(PackageUpdate.q.releaseID==release.id)
        stats[release.name] = {
                'num_updates': updates.count(),
                'num_tested': 0,
                'num_tested_without_karma': 0,
                'num_feedback': 0,
                'num_anon_feedback': 0,
                'num_critpath': 0,
                'num_critpath_approved': 0,
                'num_critpath_unapproved': 0,
                'num_stablekarma': 0,
                'num_testingtime': 0,
                'critpath_without_karma': set(),
                'conflicted_proventesters': [],
                'critpath_positive_karma_including_proventesters': [],
                'critpath_positive_karma_negative_proventesters': [],
                'stable_with_negative_karma': PackageUpdate.select(
                    AND(PackageUpdate.q.releaseID==release.id,
                        PackageUpdate.q.status=='stable',
                        PackageUpdate.q.karma < 0)).count(),
                'bugs': set(),
                'karma': defaultdict(int),
                'deltas': [],
                'occurrences': {},
                'accumulative': timedelta(),
                'packages': defaultdict(int),
                'proventesters': set(),
                'proventesters_1': 0,
                'proventesters_0': 0,
                'proventesters_-1': 0,
                # for tracking number of types of karma
                '1': 0,
                '0': 0,
                '-1': 0,
                }
        data = stats[release.name]

        for status in statuses:
            data['num_%s' % status] = PackageUpdate.select(AND(
                PackageUpdate.q.releaseID==release.id,
                PackageUpdate.q.status==status)).count()

        for type in types:
            data['num_%s' % type] = PackageUpdate.select(AND(
                PackageUpdate.q.releaseID==release.id,
                PackageUpdate.q.type==type)).count()

        for update in release.updates:
            for build in update.builds:
                data['packages'][build.package] += 1
            for bug in update.bugs:
                data['bugs'].add(bug.bz_id)

            feedback_done = False
            testingtime_done = False

            for comment in update.comments:
                if comment.author_name in ('autoqa', 'taskotron'):
                    continue

                # Track the # of +1's, -1's, and +0's.
                if comment.author_name != 'bodhi':
                    data[str(comment.karma)] += 1

                if comment.author_group == 'proventesters':
                    data['proventesters'].add(comment.author_name)
                    data['proventesters_%d' % comment.karma] += 1

                if comment.text == 'This update has reached the stable karma threshold and will be pushed to the stable updates repository':
                    data['num_stablekarma'] += 1
                elif comment.text and comment.text.endswith('days in testing and can be pushed to stable now if the maintainer wishes'):
                    data['num_testingtime'] += 1

                # For figuring out if an update has received feedback or not
                if not feedback_done:
                    if (not comment.author.startswith('bodhi') and
                            comment.karma != 0 and not comment.anonymous):
                        data['num_feedback'] += 1  # per-release tracking of feedback
                        feedback += 1  # total number of updates that have received feedback
                        feedback_done = True  # so we don't run this for each comment

                # Tracking per-author karma & anonymous feedback
                if not comment.author.startswith('bodhi'):
                    if comment.anonymous:
                        # @@: should we track anon +0 comments as "feedback"?
                        if comment.karma != 0:
                            data['num_anon_feedback'] += 1
                    else:
                        author = comment.author_name
                        data['karma'][author] += 1
                        karma[author] += 1

                if (not testingtime_done and
                        comment.text == 'This update has been pushed to testing'):
                    for othercomment in update.comments:
                        if othercomment.text == 'This update has been pushed to stable':
                            delta = othercomment.timestamp - comment.timestamp
                            data['deltas'].append(delta)
                            data['occurrences'][delta.days] = \
                                data['occurrences'].setdefault(
                                        delta.days, 0) + 1
                            data['accumulative'] += delta
                            testingtime_done = True
                            break

            if update.critpath:
                if update.critpath_approved or update.status == 'stable':
                    data['num_critpath_approved'] += 1
                else:
                    if status in ('testing', 'pending'):
                        data['num_critpath_unapproved'] += 1
                data['num_critpath'] += 1
                #if not feedback_done:
                if update.status == 'stable' and update.karma == 0:
                    data['critpath_without_karma'].add(update)

                # Proventester metrics
                proventester_karma = defaultdict(int)  # {username: karma}
                positive_proventesters = 0
                negative_proventesters = 0
                for comment in update.comments:
                    if comment.author_group == 'proventesters':
                        proventester_karma[comment.author_name] += comment.karma
                for _karma in proventester_karma.values():
                    if _karma > 0:
                        positive_proventesters += 1
                    elif _karma < 0:
                        negative_proventesters += 1

                # Conflicting proventesters
                if positive_proventesters and negative_proventesters:
                    data['conflicted_proventesters'] += [short_url(update)]

                # Track updates with overall positive karma, including positive
                # karma from a proventester
                if update.karma > 0 and positive_proventesters:
                    data['critpath_positive_karma_including_proventesters'] += [short_url(update)]

                # Track updates with overall positive karma, including negative
                # karma from a proventester
                if update.karma > 0 and negative_proventesters:
                    data['critpath_positive_karma_negative_proventesters'] += [short_url(update)]

            if testingtime_done:
                data['num_tested'] += 1
                if not feedback_done:
                    data['num_tested_without_karma'] += 1

        data['deltas'].sort()

        print " * %d updates" % data['num_updates']
        print " * %d packages updated" % (len(data['packages']))
        for status in statuses:
            print " * %d %s updates" % (data['num_%s' % status], status)
        for type in types:
            print " * %d %s updates (%0.2f%%)" % (data['num_%s' % type], type,
                    float(data['num_%s' % type]) / data['num_updates'] * 100)
        print " * %d bugs resolved" % len(data['bugs'])
        print " * %d critical path updates (%0.2f%%)" % (data['num_critpath'],
                float(data['num_critpath']) / data['num_updates'] * 100)
        print " * %d approved critical path updates" % (
                data['num_critpath_approved'])
        print " * %d unapproved critical path updates" % (
                data['num_critpath_unapproved'])
        print " * %d updates received feedback (%0.2f%%)" % (
                data['num_feedback'], (float(data['num_feedback']) /
                 data['num_updates'] * 100))
        print " * %d +0 comments" % data['0']
        print " * %d +1 comments" % data['1']
        print " * %d -1 comments" % data['-1']
        print " * %d unique authenticated karma submitters" % (
                len(data['karma']))
        print " * %d proventesters" % len(data['proventesters'])
        print "   * %d +1's from proventesters" % data['proventesters_1']
        print "   * %d -1's from proventesters" % data['proventesters_-1']
        if data['num_critpath']:
            print " * %d critpath updates with conflicting proventesters (%0.2f%% of critpath)" % (len(data['conflicted_proventesters']), float(len(data['conflicted_proventesters'])) / data['num_critpath'] * 100)
            for u in data['conflicted_proventesters']:
                print "   * " + u
            print " * %d critpath updates with positive karma and negative proventester feedback (%0.2f%% of critpath)" % (len(data['critpath_positive_karma_negative_proventesters']), float(len(data['critpath_positive_karma_negative_proventesters'])) / data['num_critpath'] * 100)
            for u in data['critpath_positive_karma_negative_proventesters']:
                print "   * " + u
            print " * %d critpath updates with positive karma and positive proventester feedback (%0.2f%% of critpath)" % (len(data['critpath_positive_karma_including_proventesters']), float(len(data['critpath_positive_karma_including_proventesters'])) / data['num_critpath'] * 100)
        print " * %d anonymous users gave feedback (%0.2f%%)" % (
                data['num_anon_feedback'], float(data['num_anon_feedback']) /
                (data['num_anon_feedback'] + sum(data['karma'].values())) * 100)
# This does not take into account updates that reach stablekarma before being pushed to testing!
#        print " * %d out of %d stable updates went through testing (%0.2f%%)" %(
#                data['num_tested'], data['num_stable'],
#                float(data['num_tested']) / data['num_stable'] * 100)
        print " * %d updates reached the stable karma threshold (%0.2f%%)" % (
                data['num_stablekarma'],
                float(data['num_stablekarma']) / data['num_stable'] * 100)
        print " * %d updates reached the minimum time in testing threshold (%0.2f%%)" % (
                data['num_testingtime'],
                float(data['num_testingtime']) / data['num_stable'] * 100)
        print " * %d went from testing to stable *without* karma (%0.2f%%)" % (
                data['num_tested_without_karma'],
                float(data['num_tested_without_karma']) /
                data['num_tested'] * 100)
        print " * %d updates were pushed to stable with negative karma (%0.2f%%)" % (
                data['stable_with_negative_karma'], float(data['stable_with_negative_karma']) / data['num_stable'] * 100)
        print " * %d critical path updates pushed to stable *without* karma" % (
                len(data['critpath_without_karma']))
        #for update in data['critpath_without_karma']:
        #    print "   * %s submitted by %s" % (update.title, update.submitter)
        print " * Time spent in testing:"
        print "   * mean = %d days" % (data['accumulative'].days /
                len(data['deltas']))
        print "   * median = %d days" % (
                data['deltas'][len(data['deltas']) / 2].days)
        print "   * mode = %d days" % (
                sorted(data['occurrences'].items(), key=itemgetter(1))[-1][0])
        #for package in sorted(data['packages'].items(), key=itemgetter(1), reverse=True):
        #    print "    * %s: %d" % (package[0].name, package[1])
        print

    print
    print "Out of %d total updates, %d received feedback (%0.2f%%)" % (
            num_updates, feedback, (float(feedback) / num_updates * 100))
    print "Out of %d total unique commenters, the top 50 are:" % (
            len(karma))
    for submitter in sorted(karma.iteritems(), key=itemgetter(1), reverse=True)[:50]:
        print " * %s (%d)" % (submitter[0], submitter[1])
Пример #56
0
    def get_feed_data(self, release=None, type=None, status=None,
                      comments=False, submitter=None, builds=None, 
                      user=None, package=None, critpath=False,
                      unapproved=None, *args, **kw):
        query = []
        entries = []
        date = lambda update: update.date_pushed
        order = PackageUpdate.q.date_pushed
        title = []
        critpath = critpath in (True, 'True', 'true')
        unapproved = unapproved in (True, 'True', 'true')

        if critpath:
            return self.get_critpath_updates(release=release,
                                             unapproved=unapproved)
        if comments:
            return self.get_latest_comments(user=user)
        if package:
            return self.get_package_updates(package, release)
        if release:
            try:
                rel = Release.byName(release.upper())
            except SQLObjectNotFound:
                return dict(title = '%s not found' % release, entries=[])
            query.append(PackageUpdate.q.releaseID == rel.id)
            title.append(rel.long_name)
        if type:
            query.append(PackageUpdate.q.type == type)
            title.append(type.title())
        if status:
            query.append(PackageUpdate.q.status == status)
            if status == 'pending':
                date = lambda update: update.date_submitted
                order = PackageUpdate.q.date_submitted
            else:
                # Let's only show pushed testing/stable updates
                query.append(PackageUpdate.q.pushed == True)
            title.append(status.title())
        else:
            query.append(PackageUpdate.q.pushed == True)

        if submitter:
            query.append(PackageUpdate.q.submitter == submitter)
            title.append("submitted by %s" % submitter)

        if builds:
            query.append(PackageUpdate.q.builds == builds)
            title.append("for %s" % builds)

        updates = PackageUpdate.select(AND(*query), orderBy=order).reversed()

        for update in updates:
            delta = datetime.utcnow() - update.date_submitted
            if delta and delta.days > config.get('feeds.num_days_to_show'):
                if len(entries) >= config.get('feeds.max_entries'):
                    break
            entries.append({
                'id'        : config.get('base_address') + url(update.get_url()),
                'summary'   : update.notes,
                'published' : date(update),
                'link'      : config.get('base_address') + url(update.get_url()),
                'title'     : "%s %sUpdate: %s" % (update.release.long_name,
                                                   update.type == 'security'
                                                   and 'Security ' or '',
                                                   update.title)
            })
            if len(update.bugs):
                bugs = "<b>Resolved Bugs</b><br/>"
                for bug in update.bugs:
                    bugs += "<a href=%s>%d</a> - %s<br/>" % (bug.get_url(),
                                                             bug.bz_id, bug.title)
                entries[-1]['summary'] = "%s<br/>%s" % (bugs[:-2],
                                                        entries[-1]['summary'])

        title.append('Updates')

        return dict(
                title = ' '.join(title),
                subtitle = "",
                link = config.get('base_address') + url('/'),
                entries = entries
        )