def clean_repo(): """ Clean up our mashed_dir, removing all referenced repositories """ log.info("Starting clean_repo job") liverepos = [] repos = config.get('mashed_dir') mash_locks = set() for release in Release.select(): lock = join(repos, 'MASHING-%s' % release.id_prefix) mash_locks.add(lock) if exists(lock): log.info("Mash in progress. Aborting clean_repo job") return for release in [rel.name.lower() for rel in Release.select()]: # TODO: keep the 2 most recent repos! for repo in [release + '-updates', release + '-updates-testing']: liverepos.append(dirname(realpath(join(repos, repo)))) for repo in [join(repos, repo) for repo in os.listdir(repos)]: if 'repodata' in repo: # skip our repodata caches continue if not islink(repo) and isdir(repo): fullpath = realpath(repo) if fullpath not in liverepos: log.info("Removing %s" % fullpath) subprocess.call(['rm', '-fr', fullpath]) # Bail out if a push started in the middle of this job for lock in mash_locks: if exists(lock): log.warning('Mash lock detected! Stopping clean_repo job.') return log.info("clean_repo complete!")
def index(self, release=None): # /updates/metrics?tg_format=json API if request_format() == 'json': json = {} query = release and [Release.byName(release)] or Release.select() for release in query: json[release.name] = release.metrics return json try: if not release: rel = Release.select()[0] release = rel.name else: rel = Release.byName(release) except SQLObjectNotFound: flash("Unknown Release") raise redirect('/metrics') widgets = MetricData().get_widgets(release) if not widgets: return dict(metrics=[], title="Metrics currently unavailable") return dict(metrics=[ widgets[name.__name__] for name in metrics if name.__name__ in widgets ], title="%s Update Metrics" % rel.long_name)
def index(self, release=None): # /updates/metrics?tg_format=json API if request_format() == "json": json = {} query = release and [Release.byName(release)] or Release.select() for release in query: json[release.name] = release.metrics return json try: if not release: rel = Release.select()[0] release = rel.name else: rel = Release.byName(release) except SQLObjectNotFound: flash("Unknown Release") raise redirect("/metrics") widgets = MetricData().get_widgets(release) if not widgets: return dict(metrics=[], title="Metrics currently unavailable") return dict( metrics=[widgets[name.__name__] for name in metrics if name.__name__ in widgets], title="%s Update Metrics" % rel.long_name, )
def save_db(): ## Save each release and it's metrics releases = [] for release in Release.select(): rel = {} for attr in ('name', 'long_name', 'id_prefix', 'dist_tag', 'locked', 'metrics'): rel[attr] = getattr(release, attr) releases.append(rel) updates = [] all_updates = PackageUpdate.select() progress = ProgressBar(maxValue=all_updates.count()) for update in all_updates: data = {} data['title'] = update.title data['builds'] = [(build.package.name, build.nvr) for build in update.builds] data['date_submitted'] = update.date_submitted data['date_pushed'] = update.date_pushed data['date_modified'] = update.date_modified data['release'] = [update.release.name, update.release.long_name, update.release.id_prefix, update.release.dist_tag] data['submitter'] = update.submitter data['update_id'] = hasattr(update, 'update_id') and update.update_id or update.updateid data['type'] = update.type data['karma'] = update.karma data['cves'] = [cve.cve_id for cve in update.cves] data['bugs'] = [] for bug in update.bugs: data['bugs'].append([bug.bz_id, bug.title, bug.security]) if hasattr(bug, 'parent'): data['bugs'][-1].append(bug.parent) else: data['bugs'][-1].append(False) data['status'] = update.status data['pushed'] = update.pushed data['notes'] = update.notes data['request'] = update.request data['comments'] = [(c.timestamp, c.author, c.text, c.karma, c.anonymous) for c in update.comments] if hasattr(update, 'approved'): data['approved'] = update.approved else: data['approved'] = None updates.append(data) progress() # Save all buildroot overrides overrides = [] for override in BuildRootOverride.select(): try: overrides.append(override.__json__()) except: print("Removing stray override: %s" % override) override.destroySelf() dump = file('bodhi-pickledb-%s' % time.strftime("%y%m%d.%H%M"), 'w') pickle.dump({'updates': updates, 'releases': releases, 'overrides': overrides}, dump) dump.close()
def get_widgets(self, release): """ Return the metrics for a specified release. If our metric widgets are more than a day old, recreate them with fresh metrics from our database. """ if self.age and get_age_in_days(self.age) < 1: return self.widgets[release] log.debug("Generating some fresh metric widgets...") freshwidgets = {} for rel in Release.select(): if not rel.metrics: log.warning("No metrics found for %s" % rel.name) return self.init_metrics(rel) if not freshwidgets.has_key(rel.name): freshwidgets[rel.name] = {} for metric in self.metrics: widget = metric.get_widget(rel.metrics[metric.__class__.__name__]) if widget: freshwidgets[rel.name][metric.__class__.__name__] = widget self.widgets = freshwidgets self.age = datetime.utcnow() return self.widgets[release]
def refresh(self): """ Refresh all of the metrics for all releases. For each release, initialize our metrics objects, and feed them every update for that release. Do the necessary calculations, and then save our metrics to the database in the Release.metrics PickleCol. """ log.info("Doing a hard refresh of our metrics data") metrics = {} updates = {} # {release: [updates,]} all_updates = list(PackageUpdate.select()) releases = list(Release.select()) for release in releases: updates[release.name] = [] for update in all_updates: updates[update.release.name].append(update) for release in releases: log.debug("Calculating metrics for %s" % release.name) self.init_metrics(release) for update in updates[release.name]: for metric in self.metrics: metric.update(update) for metric in self.metrics: metric.done() metrics[metric.__class__.__name__] = metric.get_data() release.metrics = metrics hub.commit() del all_updates del releases log.info("Metrics generation complete!")
def get_widgets(self, release): """ Return the metrics for a specified release. If our metric widgets are more than a day old, recreate them with fresh metrics from our database. """ if self.age and get_age_in_days(self.age) < 1: return self.widgets[release] log.debug("Generating some fresh metric widgets...") freshwidgets = {} for rel in Release.select(): if not rel.metrics: log.warning("No metrics found for %s" % rel.name) return self.init_metrics(rel) if not freshwidgets.has_key(rel.name): freshwidgets[rel.name] = {} for metric in self.metrics: widget = metric.get_widget( rel.metrics[metric.__class__.__name__]) if widget: freshwidgets[rel.name][metric.__class__.__name__] = widget self.widgets = freshwidgets self.age = datetime.utcnow() return self.widgets[release]
def get_security_updates(self, release): release = Release.select(Release.q.long_name==release)[0] return PackageUpdate.select( AND(PackageUpdate.q.releaseID == release.id, PackageUpdate.q.type == 'security', PackageUpdate.q.status == 'testing', PackageUpdate.q.request == None))
def _get_epel_release(self): rel = Release.select(Release.q.name=='EL5') if rel.count(): rel = rel[0] else: rel = Release(name='EL5', long_name='Fedora EPEL 5', id_prefix='FEDORA-EPEL', dist_tag='dist-5E-epel') return rel
def get_security_updates(self, release): release = Release.select(Release.q.long_name == release)[0] updates = PackageUpdate.select( AND(PackageUpdate.q.releaseID == release.id, PackageUpdate.q.type == 'security', PackageUpdate.q.status == 'testing', PackageUpdate.q.request == None)) updates = self.sort_by_days_in_testing(updates) return updates
def save_db(): ## Save each release and it's metrics releases = [] for release in Release.select(): rel = {} for attr in ('name', 'long_name', 'id_prefix', 'dist_tag', 'locked', 'metrics'): rel[attr] = getattr(release, attr) releases.append(rel) updates = [] all_updates = PackageUpdate.select() progress = ProgressBar(maxValue=all_updates.count()) for update in all_updates: data = {} data['title'] = update.title data['builds'] = [(build.package.name, build.nvr) for build in update.builds] data['date_submitted'] = update.date_submitted data['date_pushed'] = update.date_pushed data['date_modified'] = update.date_modified data['release'] = [ update.release.name, update.release.long_name, update.release.id_prefix, update.release.dist_tag ] data['submitter'] = update.submitter data['update_id'] = hasattr( update, 'update_id') and update.update_id or update.updateid data['type'] = update.type data['karma'] = update.karma data['cves'] = [cve.cve_id for cve in update.cves] data['bugs'] = [] for bug in update.bugs: data['bugs'].append([bug.bz_id, bug.title, bug.security]) if hasattr(bug, 'parent'): data['bugs'][-1].append(bug.parent) else: data['bugs'][-1].append(False) data['status'] = update.status data['pushed'] = update.pushed data['notes'] = update.notes data['request'] = update.request data['comments'] = [(c.timestamp, c.author, c.text, c.karma, c.anonymous) for c in update.comments] if hasattr(update, 'approved'): data['approved'] = update.approved else: data['approved'] = None updates.append(data) progress() dump = file('bodhi-pickledb-%s' % time.strftime("%y%m%d.%H%M"), 'w') pickle.dump({'updates': updates, 'releases': releases}, dump) dump.close()
def save_db(): ## Save each release and it's metrics releases = [] for release in Release.select(): rel = {} for attr in ("name", "long_name", "id_prefix", "dist_tag", "locked", "metrics"): rel[attr] = getattr(release, attr) releases.append(rel) updates = [] all_updates = PackageUpdate.select() progress = ProgressBar(maxValue=all_updates.count()) for update in all_updates: data = {} data["title"] = update.title data["builds"] = [(build.package.name, build.nvr) for build in update.builds] data["date_submitted"] = update.date_submitted data["date_pushed"] = update.date_pushed data["date_modified"] = update.date_modified data["release"] = [ update.release.name, update.release.long_name, update.release.id_prefix, update.release.dist_tag, ] data["submitter"] = update.submitter data["update_id"] = hasattr(update, "update_id") and update.update_id or update.updateid data["type"] = update.type data["karma"] = update.karma data["cves"] = [cve.cve_id for cve in update.cves] data["bugs"] = [] for bug in update.bugs: data["bugs"].append([bug.bz_id, bug.title, bug.security]) if hasattr(bug, "parent"): data["bugs"][-1].append(bug.parent) else: data["bugs"][-1].append(False) data["status"] = update.status data["pushed"] = update.pushed data["notes"] = update.notes data["request"] = update.request data["comments"] = [(c.timestamp, c.author, c.text, c.karma, c.anonymous) for c in update.comments] if hasattr(update, "approved"): data["approved"] = update.approved else: data["approved"] = None updates.append(data) progress() dump = file("bodhi-pickledb-%s" % time.strftime("%y%m%d.%H%M"), "w") pickle.dump({"updates": updates, "releases": releases}, dump) dump.close()
def get_unapproved_critpath_updates(self, release): release = Release.select(Release.q.long_name == release)[0] updates = [] for update in PackageUpdate.select( AND(PackageUpdate.q.releaseID == release.id, PackageUpdate.q.status == 'testing', PackageUpdate.q.request == None), orderBy=PackageUpdate.q.date_submitted).reversed(): if update.critpath and not update.critpath_approved: updates.append(update) updates = self.sort_by_days_in_testing(updates) return updates
def get_unapproved_critpath_updates(self, release): release = Release.select(Release.q.long_name==release)[0] updates = [] for update in PackageUpdate.select( AND(PackageUpdate.q.releaseID == release.id, PackageUpdate.q.status != 'stable', PackageUpdate.q.status != 'obsolete', PackageUpdate.q.request == None), orderBy=PackageUpdate.q.date_submitted).reversed(): if update.critpath and not update.critpath_approved: updates.append(update) return updates
def send_digest_mail(self): """ Send digest mail to mailing lists """ for prefix, content in self.testing_digest.items(): log.debug("Sending digest for updates-testing %s" % prefix) maildata = u"" try: security_updates = self.get_security_updates(prefix) if security_updates: maildata += u"The following %s Security updates need testing:\n Age URL\n" % prefix for update in security_updates: maildata += u" %3i %s%s\n" % ( update.days_in_testing, config.get("base_address"), url(update.get_url()), ) maildata += "\n\n" critpath_updates = self.get_unapproved_critpath_updates(prefix) if critpath_updates: maildata += u"The following %s Critical Path updates have yet to be approved:\n Age URL\n" % prefix for update in self.get_unapproved_critpath_updates(prefix): maildata += u" %3i %s%s\n" % ( update.days_in_testing, config.get("base_address"), url(update.get_url()), ) maildata += "\n\n" except Exception, e: log.exception(e) maildata += u"The following builds have been pushed to %s updates-testing\n\n" % prefix # get a list af all nvr's updlist = content.keys() # sort the list updlist.sort() # Add the list of builds to the mail for pkg in updlist: maildata += u" %s\n" % pkg # Add some space between the short list and the Details" maildata += u"\nDetails about builds:\n\n" # Add the detail of each build for nvr in updlist: maildata += u"\n" + self.testing_digest[prefix][nvr] release = Release.select(Release.q.long_name == prefix)[0] mail.send_mail( config.get("bodhi_email"), config.get("%s_test_announce_list" % release.id_prefix.lower().replace("-", "_")), "%s updates-testing report" % prefix, maildata, )
def _fetch_candidate_builds(self, pkg): """ Return all candidate builds for a given package """ matches = {} koji = get_session() koji.multicall = True for tag in [r.candidate_tag for r in Release.select()]: koji.getLatestBuilds(tag, package=pkg) results = koji.multiCall() for result in results: for entries in result: for entry in entries: matches[entry['nvr']] = entry['completion_time'] return [build[0] for build in sorted(matches.items(), key=itemgetter(1), reverse=True)]
def _fetch_candidate_builds(self, pkg): """ Return all candidate builds for a given package """ matches = {} koji = get_session() koji.multicall = True for tag in [r.candidate_tag for r in Release.select()]: koji.listTagged(tag, package=pkg) results = koji.multiCall() for result in results: for entries in result: for entry in entries: matches[entry['nvr']] = entry['completion_time'] return [build[0] for build in sorted(matches.items(), key=itemgetter(1), reverse=True)]
def masher(self): """ Display the current status of the Masher """ if config.get('masher'): data = self._masher_request('/admin/masher') if not data: data = {'masher_str': 'Unable to contact the masher','tags': []} return dict(masher_str=data['masher_str'], tags=data['tags']) else: from bodhi.masher import masher tags = [] for release in Release.select(): tags.append(release.stable_tag) tags.append(release.testing_tag) return dict(masher_str=str(masher), tags=tags)
def masher(self): """ Display the current status of the Masher """ if config.get('masher'): data = self._masher_request('/admin/masher') if not data: data = {'masher_str': 'Unable to contact the masher', 'tags': []} return dict(masher_str=data['masher_str'], tags=data['tags']) else: from bodhi.masher import masher tags = [] for release in Release.select(): tags.append(release.stable_tag) tags.append(release.testing_tag) return dict(masher_str=str(masher), tags=tags)
def clean_stable_builds(untag=False): koji = get_session() for release in Release.select(): latest_stable_builds = koji.listTagged(release.stable_tag, latest=True) latest_stable_nvrs = [build['nvr'] for build in latest_stable_builds] print "Fetched %d latest stable builds tagged with %s" % ( len(latest_stable_builds), release.stable_tag) stable_builds = koji.listTagged(release.stable_tag) stable_nvrs = [build['nvr'] for build in stable_builds] print "Fetched %d stable builds tagged with %s" % ( len(stable_builds), release.stable_tag) for latest_build in latest_stable_builds: for build in stable_builds: if build['nvr'] == latest_build['nvr']: continue compare_builds(latest_build, build, untag, release.stable_tag)
def clean_stable_builds(untag=False): koji = get_session() for release in Release.select(): latest_stable_builds = koji.listTagged(release.stable_tag, latest=True) latest_stable_nvrs = [build['nvr'] for build in latest_stable_builds] print "Fetched %d latest stable builds tagged with %s" % ( len(latest_stable_builds), release.stable_tag) stable_builds = koji.listTagged(release.stable_tag) stable_nvrs = [build['nvr'] for build in stable_builds] print "Fetched %d stable builds tagged with %s" % (len(stable_builds), release.stable_tag) for latest_build in latest_stable_builds: for build in stable_builds: if build['nvr'] == latest_build['nvr']: continue compare_builds(latest_build, build, untag, release.stable_tag)
def main(): unstable = subprocess.Popen( 'grep "\[Fedora Update\] \[unstable\]" bodhi.logs', stdout=subprocess.PIPE, shell=True) out, err = unstable.communicate() (unstable_updates, unstable_critpath, unstable_deltas, unstable_accum, unstable_occur) = parse_output(out) stable = subprocess.Popen( 'grep "\[Fedora Update\] \[stablekarma\]" bodhi.logs', stdout=subprocess.PIPE, shell=True) out, err = stable.communicate() (stable_updates, stable_critpath, stable_deltas, stable_accum, stable_occur) = parse_output(out) for release in Release.select(): print '\n' + header(release.long_name) num_updates = PackageUpdate.select( PackageUpdate.q.releaseID == release.id).count() num_stable = len(stable_updates[release.name]) num_unstable = len(unstable_updates[release.name]) num_testing = len(unstable_deltas) + len(stable_deltas) print " * %d updates automatically unpushed due to karma (%0.2f%%)" % ( num_unstable, float(num_unstable) / num_updates * 100) print " * %d of which were critical path updates" % ( unstable_critpath[release.name]) print " * %d updates automatically pushed due to karma (%0.2f%%)" % ( num_stable, float(num_stable) / num_updates * 100) print " * %d of which were critical path updates" % ( stable_critpath[release.name]) print " * Time spent in testing of updates that were pushed by karma:" print " * mean = %d days" % (stable_accum.days / len(stable_deltas)) print " * median = %d days" % stable_deltas[len(stable_deltas) / 2].days print " * mode = %d days" % sorted(stable_occur.items(), key=itemgetter(1))[-1][0] print " * Time spent in testing of updates that were unpushed by karma:" print " * mean = %d days" % (unstable_accum.days / len(unstable_deltas)) print " * median = %d days" % unstable_deltas[len(unstable_deltas) / 2].days print " * mode = %d days" % sorted(unstable_occur.items(), key=itemgetter(1))[-1][0]
def clean_testing_builds(untag=False): koji = get_session() for release in Release.select(): stable_builds = koji.listTagged(release.stable_tag, latest=True) stable_nvrs = [build["nvr"] for build in stable_builds] print "Fetched %d builds tagged with %s" % (len(stable_builds), release.stable_tag) testing_builds = koji.listTagged(release.testing_tag, latest=True) print "Fetched %d builds tagged with %s" % (len(testing_builds), release.testing_tag) testing_nvrs = [build["nvr"] for build in testing_builds] for testing_build in testing_builds: for build in testing_builds: compare_builds(testing_build, build, untag, release.testing_tag) for build in stable_builds: compare_builds(testing_build, build, untag, release.testing_tag) # Find testing updates that aren't in the list of latest builds for update in PackageUpdate.select( AND( PackageUpdate.q.releaseID == release.id, PackageUpdate.q.status == "testing", PackageUpdate.q.request == None, ) ): for build in update.builds: if build.nvr not in testing_nvrs: latest_testing = None latest_stable = None for testing in testing_nvrs: if testing.startswith(build.package.name + "-"): latest_testing = testing break for stable in stable_nvrs: if stable.startswith(build.package.name + "-"): latest_stable = stable break if latest_testing: koji_build = koji.getBuild(build.nvr) latest_build = koji.getBuild(latest_testing) if rpm.labelCompare(build_evr(koji_build), build_evr(latest_build)) < 0: print "%s in testing, latest_testing = %s, latest_stable = %s" % ( update.title, latest_testing, latest_stable, ) if untag: print "Obsoleting %s" % update.title update.obsolete(newer=latest_testing)
def get_critpath_updates(self, release=None, unapproved=None): i = 0 entries = [] base = config.get('base_address') title = 'Latest Critical Path Updates' query = [PackageUpdate.q.status != 'obsolete'] if release: try: release = Release.byName(release) except SQLObjectNotFound: return dict(title='%s release not found' % release, entries=[]) releases = [release] title = title + ' for %s' % release.long_name else: releases = Release.select() if unapproved: query.append(PackageUpdate.q.status != 'stable') for update in PackageUpdate.select( AND( OR(*[ PackageUpdate.q.releaseID == release.id for release in releases ]), *query), orderBy=PackageUpdate.q.date_submitted).reversed(): delta = datetime.utcnow() - update.date_submitted if delta and delta.days > config.get('feeds.num_days_to_show'): if len(entries) >= config.get('feeds.max_entries'): break if update.critpath: if unapproved: if update.critpath_approved: continue entries.append({ 'id': base + url(update.get_url()), 'summary': update.notes, 'link': base + url(update.get_url()), 'published': update.date_submitted, 'updated': update.date_submitted, 'title': update.title, }) i += 1 return dict(title=title, subtitle="", link=config.get('base_address') + url('/'), entries=entries)
def get_critpath_updates(self, release=None, unapproved=None): i = 0 entries = [] base = config.get('base_address') title = 'Latest Critical Path Updates' query = [PackageUpdate.q.status != 'obsolete'] if release: try: release = Release.byName(release) except SQLObjectNotFound: return dict(title = '%s release not found' % release, entries=[]) releases = [release] title = title + ' for %s' % release.long_name else: releases = Release.select() if unapproved: query.append(PackageUpdate.q.status != 'stable') for update in PackageUpdate.select( AND(OR(*[PackageUpdate.q.releaseID == release.id for release in releases]), *query), orderBy=PackageUpdate.q.date_submitted).reversed(): delta = datetime.utcnow() - update.date_submitted if delta and delta.days > config.get('feeds.num_days_to_show'): if len(entries) >= config.get('feeds.max_entries'): break if update.critpath: if unapproved: if update.critpath_approved: continue entries.append({ 'id' : base + url(update.get_url()), 'summary' : update.notes, 'link' : base + url(update.get_url()), 'published' : update.date_submitted, 'updated' : update.date_submitted, 'title' : update.title, }) i += 1 return dict( title = title, subtitle = "", link = config.get('base_address') + url('/'), entries = entries )
def send_digest_mail(self): ''' Send digest mail to mailing lists ''' for prefix, content in self.testing_digest.items(): log.debug("Sending digest for updates-testing %s" % prefix) maildata = u'' try: security_updates = self.get_security_updates(prefix) if security_updates: maildata += u'The following %s Security updates need testing:\n Age URL\n' % prefix for update in security_updates: maildata += u' %3i %s%s\n' % ( update.days_in_testing, config.get('base_address'), url(update.get_url())) maildata += '\n\n' critpath_updates = self.get_unapproved_critpath_updates(prefix) if critpath_updates: maildata += u'The following %s Critical Path updates have yet to be approved:\n Age URL\n' % prefix for update in self.get_unapproved_critpath_updates(prefix): maildata += u' %3i %s%s\n' % ( update.days_in_testing, config.get('base_address'), url(update.get_url())) maildata += '\n\n' except Exception, e: log.exception(e) maildata += u'The following builds have been pushed to %s updates-testing\n\n' % prefix # get a list af all nvr's updlist = content.keys() # sort the list updlist.sort() # Add the list of builds to the mail for pkg in updlist: maildata += u' %s\n' % pkg # Add some space between the short list and the Details" maildata += u'\nDetails about builds:\n\n' # Add the detail of each build for nvr in updlist: maildata += u"\n" + self.testing_digest[prefix][nvr] release = Release.select(Release.q.long_name == prefix)[0] mail.send_mail( config.get('bodhi_email'), config.get('%s_test_announce_list' % release.id_prefix.lower().replace('-', '_')), '%s updates-testing report' % prefix, maildata)
def clean_testing_builds(untag=False): koji = get_session() for release in Release.select(): stable_builds = koji.listTagged(release.stable_tag, latest=True) stable_nvrs = [build['nvr'] for build in stable_builds] print "Fetched %d builds tagged with %s" % (len(stable_builds), release.stable_tag) testing_builds = koji.listTagged(release.testing_tag, latest=True) print "Fetched %d builds tagged with %s" % (len(testing_builds), release.testing_tag) testing_nvrs = [build['nvr'] for build in testing_builds] for testing_build in testing_builds: for build in testing_builds: compare_builds(testing_build, build, untag, release.testing_tag) for build in stable_builds: compare_builds(testing_build, build, untag, release.testing_tag) # Find testing updates that aren't in the list of latest builds for update in PackageUpdate.select( AND(PackageUpdate.q.releaseID == release.id, PackageUpdate.q.status == 'testing', PackageUpdate.q.request == None)): for build in update.builds: if build.nvr not in testing_nvrs: latest_testing = None latest_stable = None for testing in testing_nvrs: if testing.startswith(build.package.name + '-'): latest_testing = testing break for stable in stable_nvrs: if stable.startswith(build.package.name + '-'): latest_stable = stable break if latest_testing: koji_build = koji.getBuild(build.nvr) latest_build = koji.getBuild(latest_testing) if rpm.labelCompare(build_evr(koji_build), build_evr(latest_build)) < 0: print "%s in testing, latest_testing = %s, latest_stable = %s" % ( update.title, latest_testing, latest_stable) if untag: print "Obsoleting %s" % update.title update.obsolete(newer=latest_testing)
def send_digest_mail(self): ''' Send digest mail to mailing lists ''' for prefix, content in self.testing_digest.items(): log.debug("Sending digest for updates-testing %s" % prefix) maildata = u'' try: security_updates = self.get_security_updates(prefix) if security_updates: maildata += u'The following %s Security updates need testing:\n\n' % prefix for update in security_updates: maildata += u' %s\n' % (config.get('base_address') + url(update.get_url())) maildata += '\n\n' critpath_updates = self.get_unapproved_critpath_updates(prefix) if critpath_updates: maildata += u'The following %s Critical Path updates have yet to be approved:\n\n' % prefix for update in self.get_unapproved_critpath_updates(prefix): maildata += u' %s\n' % (config.get('base_address') + url(update.get_url())) maildata += '\n\n' except Exception, e: log.exception(e) maildata += u'The following builds have been pushed to %s updates-testing\n\n' % prefix # get a list af all nvr's updlist = content.keys() # sort the list updlist.sort() # Add the list of builds to the mail for pkg in updlist: maildata += u' %s\n' % pkg # Add some space between the short list and the Details" maildata += u'\nDetails about builds:\n\n' # Add the detail of each build for nvr in updlist: maildata += u"\n" + self.testing_digest[prefix][nvr] release = Release.select(Release.q.long_name==prefix)[0] mail.send_mail(config.get('bodhi_email'), config.get('%s_test_announce_list' % release.id_prefix.lower().replace('-', '_')), '%s updates-testing report' % prefix, maildata)
def main(): unstable = subprocess.Popen('grep "\[Fedora Update\] \[unstable\]" bodhi.logs', stdout=subprocess.PIPE, shell=True) out, err = unstable.communicate() (unstable_updates, unstable_critpath, unstable_deltas, unstable_accum, unstable_occur) = parse_output(out) stable = subprocess.Popen('grep "\[Fedora Update\] \[stablekarma\]" bodhi.logs', stdout=subprocess.PIPE, shell=True) out, err = stable.communicate() (stable_updates, stable_critpath, stable_deltas, stable_accum, stable_occur) = parse_output(out) for release in Release.select(): print '\n' + header(release.long_name) num_updates = PackageUpdate.select( PackageUpdate.q.releaseID==release.id).count() num_stable = len(stable_updates[release.name]) num_unstable = len(unstable_updates[release.name]) num_testing = len(unstable_deltas) + len(stable_deltas) print " * %d updates automatically unpushed due to karma (%0.2f%%)" % ( num_unstable, float(num_unstable) / num_updates * 100) print " * %d of which were critical path updates" % ( unstable_critpath[release.name]) print " * %d updates automatically pushed due to karma (%0.2f%%)" % ( num_stable, float(num_stable) / num_updates * 100) print " * %d of which were critical path updates" % ( stable_critpath[release.name]) print " * Time spent in testing of updates that were pushed by karma:" print " * mean = %d days" % (stable_accum.days / len(stable_deltas)) print " * median = %d days" % stable_deltas[len(stable_deltas)/2].days print " * mode = %d days" % sorted(stable_occur.items(), key=itemgetter(1))[-1][0] print " * Time spent in testing of updates that were unpushed by karma:" print " * mean = %d days" % (unstable_accum.days / len(unstable_deltas)) print " * median = %d days" % unstable_deltas[len(unstable_deltas)/2].days print " * mode = %d days" % sorted(unstable_occur.items(), key=itemgetter(1))[-1][0]
def clean_pending_tags(): """ Clean up any stray pending tags """ koji = get_session() for release in Release.select(): log.info("Finding all stray pending-testing builds...") if release.name.startswith('E'): continue tag = release.pending_testing_tag tagged = [build['nvr'] for build in koji.listTagged(tag)] for nvr in tagged: try: build = PackageBuild.byNvr(nvr) for update in build.updates: if update.status in ('testing', 'stable', 'obsolete'): log.info("%s %s" % (nvr, update.status)) log.info("Untagging %s" % nvr) koji.untagBuild(tag, nvr, force=True) except SQLObjectNotFound: log.info("Can't find build for %s" % nvr) log.info("Untagging %s" % nvr) koji.untagBuild(tag, nvr, force=True) log.info("Finding all stray pending-stable builds...") tag = release.pending_stable_tag tagged = [build['nvr'] for build in koji.listTagged(tag)] for nvr in tagged: try: build = PackageBuild.byNvr(nvr) for update in build.updates: if update.status in ('pending', 'obsolete', 'stable'): log.info("%s %s" % (nvr, update.status)) log.info("Untagging %s" % nvr) koji.untagBuild(tag, nvr, force=True) except SQLObjectNotFound: log.info("Untagging %s" % nvr) koji.untagBuild(tag, nvr, force=True)
def clean_pending_tags(): """ Clean up any stray pending tags """ koji = get_session() for release in Release.select(): log.info("Finding all stray pending-testing builds...") if release.name.startswith('EL'): continue tag = release.pending_testing_tag tagged = [build['nvr'] for build in koji.listTagged(tag)] for nvr in tagged: try: build = PackageBuild.byNvr(nvr) for update in build.updates: if update.status in ('testing', 'stable', 'obsolete'): log.info("%s %s" % (nvr, update.status)) log.info("Untagging %s" % nvr) koji.untagBuild(tag, nvr, force=True) except SQLObjectNotFound: log.info("Can't find build for %s" % nvr) log.info("Untagging %s" % nvr) koji.untagBuild(tag, nvr, force=True) log.info("Finding all stray pending-stable builds...") tag = release.pending_stable_tag tagged = [build['nvr'] for build in koji.listTagged(tag)] for nvr in tagged: try: build = PackageBuild.byNvr(nvr) for update in build.updates: if update.status in ('pending', 'obsolete', 'stable'): log.info("%s %s" % (nvr, update.status)) log.info("Untagging %s" % nvr) koji.untagBuild(tag, nvr, force=True) except SQLObjectNotFound: log.info("Untagging %s" % nvr) koji.untagBuild(tag, nvr, force=True)
def main(): load_config() __connection__ = hub = PackageHub("bodhi") koji = get_session() tasks = [] broke = set() # Clean up any stray pending tags for release in Release.select(): print "Finding all pending-testing builds..." if release.name.startswith('EL'): continue tag = release.pending_testing_tag tagged = [build['nvr'] for build in koji.listTagged(tag)] for nvr in tagged: try: build = PackageBuild.byNvr(nvr) for update in build.updates: if update.status in ('testing', 'stable', 'obsolete'): print "%s %s" % (nvr, update.status) if '--fix' in sys.argv: print "Untagging %s" % nvr koji.untagBuild(tag, nvr, force=True) except SQLObjectNotFound: print "Can't find build for %s" % nvr if '--fix' in sys.argv: print "Untagging %s" % nvr koji.untagBuild(tag, nvr, force=True) tag = release.pending_stable_tag tagged = [build['nvr'] for build in koji.listTagged(tag)] for nvr in tagged: try: build = PackageBuild.byNvr(nvr) for update in build.updates: if update.status in ('pending', 'obsolete', 'stable'): print "%s %s" % (nvr, update.status) if '--fix' in sys.argv: print "Untagging %s" % nvr koji.untagBuild(tag, nvr, force=True) except SQLObjectNotFound: print "Can't find build for %s" % nvr if '--fix' in sys.argv: print "Untagging %s" % nvr koji.untagBuild(tag, nvr, force=True) # Check for testing updates that aren't tagged properly for update in PackageUpdate.select(PackageUpdate.q.status == 'testing'): dest_tag = update.release.testing_tag for build in update.builds: tags = [tag['name'] for tag in koji.listTags(build=build.nvr)] if dest_tag not in tags: print "%s marked as testing, but tagged with %s" % (build.nvr, tags) if '--fix' in sys.argv: broke.add((tags[0], dest_tag, build.nvr)) # Check all candidate updates to see if they are in a different bodhi state for release in Release.select(): tag = release.candidate_tag tagged = [build['nvr'] for build in koji.listTagged(tag, latest=True)] for nvr in tagged: try: build = PackageBuild.byNvr(nvr) for update in build.updates: if update.status in ('testing', 'stable'): print "%s %s but tagged as %s" % (nvr, update.status, tag) if '--fix' in sys.argv: dest = release.testing_tag if update.status == 'stable': dest = release.stable_tag elif update.status == 'obsolete': dest = release.candidate_tag broke.add((tag, dest, nvr)) except SQLObjectNotFound: pass # Make sure that all builds in koji tagged as an update exist # in bodhi, and are in the expect state. for release in Release.select(): for tag in (release.testing_tag, release.stable_tag): tagged = [ build['nvr'] for build in koji.listTagged(tag, latest=True) ] for nvr in tagged: try: build = PackageBuild.byNvr(nvr) except SQLObjectNotFound: print "PackageUpdate(%s) not found!" % nvr continue if not len(build.updates): print "PackageBuild(%s) has no updates" % (build.nvr) status = 'testing' in tag and 'testing' or 'stable' for update in build.updates: if update.status != status: print "%s is %s in bodhi but tagged as %s in koji" % ( update.title, update.status, tag) if '--fix' in sys.argv: dest = release.testing_tag if update.status == 'stable': dest = release.stable_tag elif update.status == 'obsolete': dest = release.candidate_tag for b in update.builds: broke.add((tag, dest, b.nvr)) if broke: print " ** Fixing broken tags! **" koji.multicall = True for tag, dest, build in broke: print "Moving %s from %s to %s" % (build, tag, dest) koji.moveBuild(tag, dest, build, force=True) print "Running koji.multiCall()" results = koji.multiCall() success = False print "Waiting for tasks" bad_tasks = wait_for_tasks([task[0] for task in results]) if bad_tasks == 0: success = True if success: print "Tags successfully moved!" else: print "Error moving tags!" print "bad_tasks = %r" % bad_tasks
for build in iterate(builds): release = None n, v, r = get_nvr(build) # Make sure the build is tagged correctly try: tags = [tag['name'] for tag in koji.listTags(build)] except Exception, e: flash(str(e)) if request_format() == 'json': return dict() raise redirect('/override/new') # Determine the release by the tag, and sanity check the builds for tag in tags: for rel in Release.select(): if tag in (rel.candidate_tag, rel.testing_tag): release = last_release = rel elif tag == rel.stable_tag: flash('Error: %s is already tagged with %s' % (build, tag)) if request_format() == 'json': return dict() raise redirect('/override/new') if not release: flash( 'Error: Could not determine release for %s with tags %s' % (build, map(str, tags))) if request_format() == 'json': return dict() raise redirect('/override/new')
def main(): load_config() stats = {} # {release: {'stat': ...}} feedback = 0 # total number of updates that received feedback karma = defaultdict(int) # {username: # of karma submissions} num_updates = PackageUpdate.select().count() proventesters = set() for release in Release.select(): print header(release.long_name) updates = PackageUpdate.select(PackageUpdate.q.releaseID==release.id) stats[release.name] = { 'num_updates': updates.count(), 'num_tested': 0, 'num_tested_without_karma': 0, 'num_feedback': 0, 'num_anon_feedback': 0, 'num_critpath': 0, 'num_critpath_approved': 0, 'num_critpath_unapproved': 0, 'num_stablekarma': 0, 'num_testingtime': 0, 'critpath_without_karma': set(), 'conflicted_proventesters': [], 'critpath_positive_karma_including_proventesters': [], 'critpath_positive_karma_negative_proventesters': [], 'stable_with_negative_karma': PackageUpdate.select( AND(PackageUpdate.q.releaseID==release.id, PackageUpdate.q.status=='stable', PackageUpdate.q.karma < 0)).count(), 'bugs': set(), 'karma': defaultdict(int), 'deltas': [], 'occurrences': {}, 'accumulative': timedelta(), 'packages': defaultdict(int), 'proventesters': set(), 'proventesters_1': 0, 'proventesters_0': 0, 'proventesters_-1': 0, # for tracking number of types of karma '1': 0, '0': 0, '-1': 0, } data = stats[release.name] for status in statuses: data['num_%s' % status] = PackageUpdate.select(AND( PackageUpdate.q.releaseID==release.id, PackageUpdate.q.status==status)).count() for type in types: data['num_%s' % type] = PackageUpdate.select(AND( PackageUpdate.q.releaseID==release.id, PackageUpdate.q.type==type)).count() for update in release.updates: for build in update.builds: data['packages'][build.package] += 1 for bug in update.bugs: data['bugs'].add(bug.bz_id) feedback_done = False testingtime_done = False for comment in update.comments: if comment.author_name in ('autoqa', 'taskotron'): continue # Track the # of +1's, -1's, and +0's. if comment.author_name != 'bodhi': data[str(comment.karma)] += 1 if comment.author_group == 'proventesters': data['proventesters'].add(comment.author_name) data['proventesters_%d' % comment.karma] += 1 if comment.text == 'This update has reached the stable karma threshold and will be pushed to the stable updates repository': data['num_stablekarma'] += 1 elif comment.text and comment.text.endswith('days in testing and can be pushed to stable now if the maintainer wishes'): data['num_testingtime'] += 1 # For figuring out if an update has received feedback or not if not feedback_done: if (not comment.author.startswith('bodhi') and comment.karma != 0 and not comment.anonymous): data['num_feedback'] += 1 # per-release tracking of feedback feedback += 1 # total number of updates that have received feedback feedback_done = True # so we don't run this for each comment # Tracking per-author karma & anonymous feedback if not comment.author.startswith('bodhi'): if comment.anonymous: # @@: should we track anon +0 comments as "feedback"? if comment.karma != 0: data['num_anon_feedback'] += 1 else: author = comment.author_name data['karma'][author] += 1 karma[author] += 1 if (not testingtime_done and comment.text == 'This update has been pushed to testing'): for othercomment in update.comments: if othercomment.text == 'This update has been pushed to stable': delta = othercomment.timestamp - comment.timestamp data['deltas'].append(delta) data['occurrences'][delta.days] = \ data['occurrences'].setdefault( delta.days, 0) + 1 data['accumulative'] += delta testingtime_done = True break if update.critpath: if update.critpath_approved or update.status == 'stable': data['num_critpath_approved'] += 1 else: if status in ('testing', 'pending'): data['num_critpath_unapproved'] += 1 data['num_critpath'] += 1 #if not feedback_done: if update.status == 'stable' and update.karma == 0: data['critpath_without_karma'].add(update) # Proventester metrics proventester_karma = defaultdict(int) # {username: karma} positive_proventesters = 0 negative_proventesters = 0 for comment in update.comments: if comment.author_group == 'proventesters': proventester_karma[comment.author_name] += comment.karma for _karma in proventester_karma.values(): if _karma > 0: positive_proventesters += 1 elif _karma < 0: negative_proventesters += 1 # Conflicting proventesters if positive_proventesters and negative_proventesters: data['conflicted_proventesters'] += [short_url(update)] # Track updates with overall positive karma, including positive # karma from a proventester if update.karma > 0 and positive_proventesters: data['critpath_positive_karma_including_proventesters'] += [short_url(update)] # Track updates with overall positive karma, including negative # karma from a proventester if update.karma > 0 and negative_proventesters: data['critpath_positive_karma_negative_proventesters'] += [short_url(update)] if testingtime_done: data['num_tested'] += 1 if not feedback_done: data['num_tested_without_karma'] += 1 data['deltas'].sort() print " * %d updates" % data['num_updates'] print " * %d packages updated" % (len(data['packages'])) for status in statuses: print " * %d %s updates" % (data['num_%s' % status], status) for type in types: print " * %d %s updates (%0.2f%%)" % (data['num_%s' % type], type, float(data['num_%s' % type]) / data['num_updates'] * 100) print " * %d bugs resolved" % len(data['bugs']) print " * %d critical path updates (%0.2f%%)" % (data['num_critpath'], float(data['num_critpath']) / data['num_updates'] * 100) print " * %d approved critical path updates" % ( data['num_critpath_approved']) print " * %d unapproved critical path updates" % ( data['num_critpath_unapproved']) print " * %d updates received feedback (%0.2f%%)" % ( data['num_feedback'], (float(data['num_feedback']) / data['num_updates'] * 100)) print " * %d +0 comments" % data['0'] print " * %d +1 comments" % data['1'] print " * %d -1 comments" % data['-1'] print " * %d unique authenticated karma submitters" % ( len(data['karma'])) print " * %d proventesters" % len(data['proventesters']) print " * %d +1's from proventesters" % data['proventesters_1'] print " * %d -1's from proventesters" % data['proventesters_-1'] if data['num_critpath']: print " * %d critpath updates with conflicting proventesters (%0.2f%% of critpath)" % (len(data['conflicted_proventesters']), float(len(data['conflicted_proventesters'])) / data['num_critpath'] * 100) for u in data['conflicted_proventesters']: print " * " + u print " * %d critpath updates with positive karma and negative proventester feedback (%0.2f%% of critpath)" % (len(data['critpath_positive_karma_negative_proventesters']), float(len(data['critpath_positive_karma_negative_proventesters'])) / data['num_critpath'] * 100) for u in data['critpath_positive_karma_negative_proventesters']: print " * " + u print " * %d critpath updates with positive karma and positive proventester feedback (%0.2f%% of critpath)" % (len(data['critpath_positive_karma_including_proventesters']), float(len(data['critpath_positive_karma_including_proventesters'])) / data['num_critpath'] * 100) print " * %d anonymous users gave feedback (%0.2f%%)" % ( data['num_anon_feedback'], float(data['num_anon_feedback']) / (data['num_anon_feedback'] + sum(data['karma'].values())) * 100) # This does not take into account updates that reach stablekarma before being pushed to testing! # print " * %d out of %d stable updates went through testing (%0.2f%%)" %( # data['num_tested'], data['num_stable'], # float(data['num_tested']) / data['num_stable'] * 100) print " * %d updates reached the stable karma threshold (%0.2f%%)" % ( data['num_stablekarma'], float(data['num_stablekarma']) / data['num_stable'] * 100) print " * %d updates reached the minimum time in testing threshold (%0.2f%%)" % ( data['num_testingtime'], float(data['num_testingtime']) / data['num_stable'] * 100) print " * %d went from testing to stable *without* karma (%0.2f%%)" % ( data['num_tested_without_karma'], float(data['num_tested_without_karma']) / data['num_tested'] * 100) print " * %d updates were pushed to stable with negative karma (%0.2f%%)" % ( data['stable_with_negative_karma'], float(data['stable_with_negative_karma']) / data['num_stable'] * 100) print " * %d critical path updates pushed to stable *without* karma" % ( len(data['critpath_without_karma'])) #for update in data['critpath_without_karma']: # print " * %s submitted by %s" % (update.title, update.submitter) print " * Time spent in testing:" print " * mean = %d days" % (data['accumulative'].days / len(data['deltas'])) print " * median = %d days" % ( data['deltas'][len(data['deltas']) / 2].days) print " * mode = %d days" % ( sorted(data['occurrences'].items(), key=itemgetter(1))[-1][0]) #for package in sorted(data['packages'].items(), key=itemgetter(1), reverse=True): # print " * %s: %d" % (package[0].name, package[1]) print print print "Out of %d total updates, %d received feedback (%0.2f%%)" % ( num_updates, feedback, (float(feedback) / num_updates * 100)) print "Out of %d total unique commenters, the top 50 are:" % ( len(karma)) for submitter in sorted(karma.iteritems(), key=itemgetter(1), reverse=True)[:50]: print " * %s (%d)" % (submitter[0], submitter[1])
def main(): load_config() stats = {} # {release: {'stat': ...}} feedback = 0 # total number of updates that received feedback karma = defaultdict(int) # {username: # of karma submissions} num_updates = PackageUpdate.select().count() proventesters = set() for release in Release.select(): print header(release.long_name) updates = PackageUpdate.select(PackageUpdate.q.releaseID == release.id) stats[release.name] = { 'num_updates': updates.count(), 'num_tested': 0, 'num_tested_without_karma': 0, 'num_feedback': 0, 'num_anon_feedback': 0, 'num_critpath': 0, 'num_critpath_approved': 0, 'num_critpath_unapproved': 0, 'num_stablekarma': 0, 'num_testingtime': 0, 'critpath_without_karma': set(), 'conflicted_proventesters': [], 'critpath_positive_karma_including_proventesters': [], 'critpath_positive_karma_negative_proventesters': [], 'stable_with_negative_karma': PackageUpdate.select( AND(PackageUpdate.q.releaseID == release.id, PackageUpdate.q.status == 'stable', PackageUpdate.q.karma < 0)).count(), 'bugs': set(), 'karma': defaultdict(int), 'deltas': [], 'occurrences': {}, 'accumulative': timedelta(), 'packages': defaultdict(int), 'proventesters': set(), 'proventesters_1': 0, 'proventesters_0': 0, 'proventesters_-1': 0, # for tracking number of types of karma '1': 0, '0': 0, '-1': 0, } data = stats[release.name] for status in statuses: data['num_%s' % status] = PackageUpdate.select( AND(PackageUpdate.q.releaseID == release.id, PackageUpdate.q.status == status)).count() for type in types: data['num_%s' % type] = PackageUpdate.select( AND(PackageUpdate.q.releaseID == release.id, PackageUpdate.q.type == type)).count() for update in release.updates: for build in update.builds: data['packages'][build.package] += 1 for bug in update.bugs: data['bugs'].add(bug.bz_id) feedback_done = False testingtime_done = False for comment in update.comments: if comment.author_name in ('autoqa', 'taskotron'): continue # Track the # of +1's, -1's, and +0's. if comment.author_name != 'bodhi': data[str(comment.karma)] += 1 if comment.author_group == 'proventesters': data['proventesters'].add(comment.author_name) data['proventesters_%d' % comment.karma] += 1 if comment.text == 'This update has reached the stable karma threshold and will be pushed to the stable updates repository': data['num_stablekarma'] += 1 elif comment.text and comment.text.endswith( 'days in testing and can be pushed to stable now if the maintainer wishes' ): data['num_testingtime'] += 1 # For figuring out if an update has received feedback or not if not feedback_done: if (not comment.author.startswith('bodhi') and comment.karma != 0 and not comment.anonymous): data[ 'num_feedback'] += 1 # per-release tracking of feedback feedback += 1 # total number of updates that have received feedback feedback_done = True # so we don't run this for each comment # Tracking per-author karma & anonymous feedback if not comment.author.startswith('bodhi'): if comment.anonymous: # @@: should we track anon +0 comments as "feedback"? if comment.karma != 0: data['num_anon_feedback'] += 1 else: author = comment.author_name data['karma'][author] += 1 karma[author] += 1 if (not testingtime_done and comment.text == 'This update has been pushed to testing'): for othercomment in update.comments: if othercomment.text == 'This update has been pushed to stable': delta = othercomment.timestamp - comment.timestamp data['deltas'].append(delta) data['occurrences'][delta.days] = \ data['occurrences'].setdefault( delta.days, 0) + 1 data['accumulative'] += delta testingtime_done = True break if update.critpath: if update.critpath_approved or update.status == 'stable': data['num_critpath_approved'] += 1 else: if status in ('testing', 'pending'): data['num_critpath_unapproved'] += 1 data['num_critpath'] += 1 #if not feedback_done: if update.status == 'stable' and update.karma == 0: data['critpath_without_karma'].add(update) # Proventester metrics proventester_karma = defaultdict(int) # {username: karma} positive_proventesters = 0 negative_proventesters = 0 for comment in update.comments: if comment.author_group == 'proventesters': proventester_karma[ comment.author_name] += comment.karma for _karma in proventester_karma.values(): if _karma > 0: positive_proventesters += 1 elif _karma < 0: negative_proventesters += 1 # Conflicting proventesters if positive_proventesters and negative_proventesters: data['conflicted_proventesters'] += [short_url(update)] # Track updates with overall positive karma, including positive # karma from a proventester if update.karma > 0 and positive_proventesters: data[ 'critpath_positive_karma_including_proventesters'] += [ short_url(update) ] # Track updates with overall positive karma, including negative # karma from a proventester if update.karma > 0 and negative_proventesters: data['critpath_positive_karma_negative_proventesters'] += [ short_url(update) ] if testingtime_done: data['num_tested'] += 1 if not feedback_done: data['num_tested_without_karma'] += 1 data['deltas'].sort() print " * %d updates" % data['num_updates'] print " * %d packages updated" % (len(data['packages'])) for status in statuses: print " * %d %s updates" % (data['num_%s' % status], status) for type in types: print " * %d %s updates (%0.2f%%)" % ( data['num_%s' % type], type, float(data['num_%s' % type]) / data['num_updates'] * 100) print " * %d bugs resolved" % len(data['bugs']) print " * %d critical path updates (%0.2f%%)" % ( data['num_critpath'], float(data['num_critpath']) / data['num_updates'] * 100) print " * %d approved critical path updates" % ( data['num_critpath_approved']) print " * %d unapproved critical path updates" % ( data['num_critpath_unapproved']) print " * %d updates received feedback (%0.2f%%)" % ( data['num_feedback'], (float(data['num_feedback']) / data['num_updates'] * 100)) print " * %d +0 comments" % data['0'] print " * %d +1 comments" % data['1'] print " * %d -1 comments" % data['-1'] print " * %d unique authenticated karma submitters" % (len( data['karma'])) print " * %d proventesters" % len(data['proventesters']) print " * %d +1's from proventesters" % data['proventesters_1'] print " * %d -1's from proventesters" % data['proventesters_-1'] if data['num_critpath']: print " * %d critpath updates with conflicting proventesters (%0.2f%% of critpath)" % ( len(data['conflicted_proventesters']), float(len(data['conflicted_proventesters'])) / data['num_critpath'] * 100) for u in data['conflicted_proventesters']: print " * " + u print " * %d critpath updates with positive karma and negative proventester feedback (%0.2f%% of critpath)" % ( len(data['critpath_positive_karma_negative_proventesters']), float( len(data['critpath_positive_karma_negative_proventesters']) ) / data['num_critpath'] * 100) for u in data['critpath_positive_karma_negative_proventesters']: print " * " + u print " * %d critpath updates with positive karma and positive proventester feedback (%0.2f%% of critpath)" % ( len(data['critpath_positive_karma_including_proventesters']), float( len(data['critpath_positive_karma_including_proventesters'] )) / data['num_critpath'] * 100) print " * %d anonymous users gave feedback (%0.2f%%)" % ( data['num_anon_feedback'], float(data['num_anon_feedback']) / (data['num_anon_feedback'] + sum(data['karma'].values())) * 100) # This does not take into account updates that reach stablekarma before being pushed to testing! # print " * %d out of %d stable updates went through testing (%0.2f%%)" %( # data['num_tested'], data['num_stable'], # float(data['num_tested']) / data['num_stable'] * 100) print " * %d updates reached the stable karma threshold (%0.2f%%)" % ( data['num_stablekarma'], float(data['num_stablekarma']) / data['num_stable'] * 100) print " * %d updates reached the minimum time in testing threshold (%0.2f%%)" % ( data['num_testingtime'], float(data['num_testingtime']) / data['num_stable'] * 100) print " * %d went from testing to stable *without* karma (%0.2f%%)" % ( data['num_tested_without_karma'], float(data['num_tested_without_karma']) / data['num_tested'] * 100) print " * %d updates were pushed to stable with negative karma (%0.2f%%)" % ( data['stable_with_negative_karma'], float(data['stable_with_negative_karma']) / data['num_stable'] * 100) print " * %d critical path updates pushed to stable *without* karma" % ( len(data['critpath_without_karma'])) #for update in data['critpath_without_karma']: # print " * %s submitted by %s" % (update.title, update.submitter) print " * Time spent in testing:" print " * mean = %d days" % (data['accumulative'].days / len(data['deltas'])) print " * median = %d days" % (data['deltas'][len(data['deltas']) / 2].days) print " * mode = %d days" % (sorted(data['occurrences'].items(), key=itemgetter(1))[-1][0]) #for package in sorted(data['packages'].items(), key=itemgetter(1), reverse=True): # print " * %s: %d" % (package[0].name, package[1]) print print print "Out of %d total updates, %d received feedback (%0.2f%%)" % ( num_updates, feedback, (float(feedback) / num_updates * 100)) print "Out of %d total unique commenters, the top 50 are:" % (len(karma)) for submitter in sorted(karma.iteritems(), key=itemgetter(1), reverse=True)[:50]: print " * %s (%d)" % (submitter[0], submitter[1])
for build in iterate(builds): release = None n, v, r = get_nvr(build) # Make sure the build is tagged correctly try: tags = [tag["name"] for tag in koji.listTags(build)] except Exception, e: flash(str(e)) if request_format() == "json": return dict() raise redirect("/override/new") # Determine the release by the tag, and sanity check the builds for tag in tags: for rel in Release.select(): if tag in (rel.candidate_tag, rel.testing_tag): release = last_release = rel elif tag == rel.stable_tag: flash("Error: %s is already tagged with %s" % (build, tag)) if request_format() == "json": return dict() raise redirect("/override/new") if not release: flash("Error: Could not determine release for %s with tags %s" % (build, map(str, tags))) if request_format() == "json": return dict() raise redirect("/override/new") # Make sure the user has commit rights to the appropriate branch
def main(): load_config() __connection__ = hub = PackageHub("bodhi") koji = get_session() tasks = [] broke = set() # Clean up any stray pending tags for release in Release.select(): print "Finding all pending-testing builds..." if release.name.startswith('EL'): continue tag = release.pending_testing_tag tagged = [build['nvr'] for build in koji.listTagged(tag)] for nvr in tagged: try: build = PackageBuild.byNvr(nvr) for update in build.updates: if update.status in ('testing', 'stable', 'obsolete'): print "%s %s" % (nvr, update.status) if '--fix' in sys.argv: print "Untagging %s" % nvr koji.untagBuild(tag, nvr, force=True) except SQLObjectNotFound: print "Can't find build for %s" % nvr if '--fix' in sys.argv: print "Untagging %s" % nvr koji.untagBuild(tag, nvr, force=True) tag = release.pending_stable_tag tagged = [build['nvr'] for build in koji.listTagged(tag)] for nvr in tagged: try: build = PackageBuild.byNvr(nvr) for update in build.updates: if update.status in ('pending', 'obsolete'): print "%s %s" % (nvr, update.status) if '--fix' in sys.argv: print "Untagging %s" % nvr koji.untagBuild(tag, nvr, force=True) except SQLObjectNotFound: print "Can't find build for %s" % nvr if '--fix' in sys.argv: print "Untagging %s" % nvr koji.untagBuild(tag, nvr, force=True) # Check for testing updates that aren't tagged properly for update in PackageUpdate.select(PackageUpdate.q.status=='testing'): dest_tag = update.release.testing_tag for build in update.builds: tags = [tag['name'] for tag in koji.listTags(build=build.nvr)] if dest_tag not in tags: print "%s marked as testing, but tagged with %s" % (build.nvr, tags) if '--fix' in sys.argv: broke.add((tags[0], dest_tag, build.nvr)) # Check all candidate updates to see if they are in a different bodhi state for release in Release.select(): tag = release.candidate_tag tagged = [build['nvr'] for build in koji.listTagged(tag, latest=True)] for nvr in tagged: try: build = PackageBuild.byNvr(nvr) for update in build.updates: if update.status in ('testing', 'stable'): print "%s %s but tagged as %s" % (nvr, update.status, tag) if '--fix' in sys.argv: dest = release.testing_tag if update.status == 'stable': dest = release.stable_tag elif update.status == 'obsolete': dest = release.candidate_tag broke.add((tag, dest, nvr)) except SQLObjectNotFound: pass # Make sure that all builds in koji tagged as an update exist # in bodhi, and are in the expect state. for release in Release.select(): for tag in (release.testing_tag, release.stable_tag): tagged = [build['nvr'] for build in koji.listTagged(tag, latest=True)] for nvr in tagged: try: build = PackageBuild.byNvr(nvr) except SQLObjectNotFound: print "PackageUpdate(%s) not found!" % nvr continue if not len(build.updates): print "PackageBuild(%s) has no updates" % (build.nvr) status = 'testing' in tag and 'testing' or 'stable' for update in build.updates: if update.status != status: print "%s is %s in bodhi but tagged as %s in koji" % ( update.title, update.status, tag) if '--fix' in sys.argv: dest = release.testing_tag if update.status == 'stable': dest = release.stable_tag elif update.status == 'obsolete': dest = release.candidate_tag for b in update.builds: broke.add((tag, dest, b.nvr)) if broke: print " ** Fixing broken tags! **" koji.multicall = True for tag, dest, build in broke: print "Moving %s from %s to %s" % (build, tag, dest) koji.moveBuild(tag, dest, build, force=True) print "Running koji.multiCall()" results = koji.multiCall() success = False print "Waiting for tasks" bad_tasks = wait_for_tasks([task[0] for task in results]) if bad_tasks == 0: success = True if success: print "Tags successfully moved!" else: print "Error moving tags!" print "bad_tasks = %r" % bad_tasks