def testing_statistics(): """ Calculate and display various testing statistics """ from datetime import timedelta from bodhi.model import PackageUpdate deltas = [] occurrences = {} accumulative = timedelta() for update in PackageUpdate.select(): for comment in update.comments: if comment.text == 'This update has been pushed to testing': for othercomment in update.comments: if othercomment.text == 'This update has been pushed to stable': delta = othercomment.timestamp - comment.timestamp deltas.append(delta) occurrences[delta.days] = occurrences.setdefault( delta.days, 0) + 1 accumulative += deltas[-1] break break deltas.sort() all = PackageUpdate.select().count() percentage = int(float(len(deltas)) / float(all) * 100) mode = sorted(occurrences.items(), cmp=lambda x, y: cmp(x[1], y[1]))[-1][0] print "%d out of %d updates went through testing (%d%%)" % ( len(deltas), all, percentage) print "mean = %d days" % (accumulative.days / len(deltas)) print "median = %d days" % deltas[len(deltas) / 2].days print "mode = %d days" % mode
def testing_statistics(): """ Calculate and display various testing statistics """ from datetime import timedelta from bodhi.model import PackageUpdate deltas = [] occurrences = {} accumulative = timedelta() for update in PackageUpdate.select(): for comment in update.comments: if comment.text == 'This update has been pushed to testing': for othercomment in update.comments: if othercomment.text == 'This update has been pushed to stable': delta = othercomment.timestamp - comment.timestamp deltas.append(delta) occurrences[delta.days] = occurrences.setdefault(delta.days, 0) + 1 accumulative += deltas[-1] break break deltas.sort() all = PackageUpdate.select().count() percentage = int(float(len(deltas)) / float(all) * 100) mode = sorted(occurrences.items(), cmp=lambda x, y: cmp(x[1], y[1]))[-1][0] print "%d out of %d updates went through testing (%d%%)" % (len(deltas), all, percentage) print "mean = %d days" % (accumulative.days / len(deltas)) print "median = %d days" % deltas[len(deltas) / 2].days print "mode = %d days" % mode
def nagmail(): """ Nag the submitters of updates based on a list of queries """ log.info("Starting nagmail job!") queries = [ ('old_testing', PackageUpdate.select( AND(PackageUpdate.q.status == 'testing', PackageUpdate.q.request == None)), lambda update: update.days_in_testing), ('old_pending', PackageUpdate.select( AND(PackageUpdate.q.status == 'pending', PackageUpdate.q.request == None)), lambda update: get_age_in_days(update.date_submitted)), ] oldname = None mail_admin = False #mail_proventesters = False for name, query, date in queries: for update in query: if date(update) > 14: if update.nagged: if update.nagged.has_key(name) and update.nagged[name]: if (datetime.utcnow() - update.nagged[name]).days < 7: continue # Only nag once a week at most nagged = update.nagged else: nagged = {} if update.critpath: if update.critpath_approved: continue else: oldname = name name = 'old_testing_critpath' mail_admin = True #mail_proventesters = True log.info("[%s] Nagging %s about %s" % (name, update.submitter, update.title)) mail.send(update.submitter, name, update) if mail_admin: mail.send_admin(name, update) mail_admin = False #if mail_proventesters: # mail.send(config.get('proventesters_email'), name, update) # mail_proventesters = False nagged[name] = datetime.utcnow() update.nagged = nagged if oldname: name = oldname oldname = None log.info("nagmail complete!")
def _lock(self): """ Write out what updates we are pushing and any successfully mashed repositories to our MASHING lock """ mashed_dir = config.get("mashed_dir") mash_stage = config.get("mashed_stage_dir") mash_lock = join(mashed_dir, "MASHING-%s" % self.mash_lock_id) if not os.path.isdir(mashed_dir): log.info("Creating mashed_dir %s" % mashed_dir) os.makedirs(mashed_dir) if not os.path.isdir(mash_stage): log.info("Creating mashed_stage_dir %s" % mash_stage) os.makedirs(mash_stage) if os.path.exists(mash_lock): if self.resume: log.debug("Resuming previous push!") lock = file(mash_lock, "r") masher_state = pickle.load(lock) lock.close() # For backwards compatability, we need to make sure we handle # masher state that is just a list of updates, as well as a # dictionary of updates and successfully mashed repos if isinstance(masher_state, list): for up in masher_state: try: up = PackageUpdate.byTitle(up) self.updates.add(up) except SQLObjectNotFound: log.warning("Cannot find %s" % up) # { 'updates' : [PackageUpdate.title,], # 'repos' : ['/path_to_completed_repo',] } elif isinstance(masher_state, dict): for up in masher_state["updates"]: try: up = PackageUpdate.byTitle(up) self.updates.add(up) except SQLObjectNotFound: log.warning("Cannot find %s" % up) for repo in masher_state["composed_repos"]: self.composed_repos.append(repo) else: log.error("Unknown masher lock format: %s" % masher_state) raise MashTaskException else: log.error("Previous mash not complete! Either resume the last " "push, or remove %s" % mash_lock) raise MashTaskException else: if self.resume: msg = "Trying to resume a push, yet %s doesn't exist!" % mash_lock log.error(msg) raise MashTaskException(msg) log.debug("Creating lock for updates push: %s" % mash_lock) lock = file(mash_lock, "w") pickle.dump( {"updates": [update.title for update in self.updates], "composed_repos": self.composed_repos}, lock ) lock.close()
def get_update(self, name='TurboGears-1.0.2.2-2.fc7'): update = PackageUpdate(title=name, release=get_rel(), submitter='*****@*****.**', status='testing', notes='foobar', type='security') build = get_build(name) update.addPackageBuild(build) return update
def test_delete(self): bodhi = self.__get_bodhi_client() opts = self.__get_opts() self.__save_update(self.build, opts, bodhi) assert PackageUpdate.byTitle(self.build) data = bodhi.delete(update=self.build) try: PackageUpdate.byTitle(self.build) assert False, "Update not deleted properly" except SQLObjectNotFound: pass
def test_request(self): bodhi = self.__get_bodhi_client() opts = self.__get_opts() self.__save_update(self.build, opts, bodhi) assert PackageUpdate.byTitle(self.build) bodhi.request(update=self.build, request=opts.request) update = PackageUpdate.byTitle(self.build) assert update.request == 'testing' opts.request = 'testing' bodhi.request(update=self.build, request=opts.request) update = PackageUpdate.byTitle(self.build) assert update.request == 'testing'
def test_encoding(self, buildnvr='yum-3.2.1-1.fc7'): update = PackageUpdate(title=buildnvr, release=get_rel(), submitter=u'Foo \xc3\xa9 Bar <*****@*****.**>', notes=u'Testing \u2019t stuff', type='security') assert update assert update.notes == u'Testing \u2019t stuff' assert update.submitter == u'Foo \xc3\xa9 Bar <*****@*****.**>' build = get_build(buildnvr) update.addPackageBuild(build) update = PackageUpdate.byTitle(buildnvr) assert update.builds[0].updates[0] == update return update
def test_comment(self): bodhi = self.__get_bodhi_client() opts = self.__get_opts() self.__save_update(self.build, opts, bodhi) assert PackageUpdate.byTitle(self.build) bodhi.comment(update=self.build, comment=opts.comment, karma=opts.karma) update = PackageUpdate.byTitle(self.build) assert len(update.comments) == 2, update.comments assert update.comments[1].text == opts.comment assert update.karma == 0 # Submitter cannot alter karma #assert update.karma == int(opts.karma), update.karma bodhi.comment(update=self.build, comment=opts.comment, karma=1) update = PackageUpdate.byTitle(self.build) assert len(update.comments) == 3, update.comments
def test_comment(self): bodhi = self.__get_bodhi_client() opts = self.__get_opts() self.__save_update(self.build, opts, bodhi) assert PackageUpdate.byTitle(self.build) bodhi.comment(update=self.build, comment=opts.comment, karma=opts.karma) update = PackageUpdate.byTitle(self.build) assert len(update.comments) == 2, update.comments assert update.comments[1].text == opts.comment assert update.karma == int(opts.karma) bodhi.comment(update=self.build, comment=opts.comment, karma=1) update = PackageUpdate.byTitle(self.build) assert len(update.comments) == 3, update.comments assert update.karma == int(opts.karma) + 2
def approve_testing_updates(): """ Scan all testing updates and approve ones that have met the per-release testing requirements. https://fedoraproject.org/wiki/Package_update_acceptance_criteria """ log.info('Running approve_testing_updates job...') for update in PackageUpdate.select( AND(PackageUpdate.q.status == 'testing', PackageUpdate.q.request == None)): # If this release does not have any testing requirements, skip it if not update.release.mandatory_days_in_testing: continue # If this has already met testing requirements, skip it if update.met_testing_requirements: continue # If this is a critpath update, skip it, since they have their own # testing requirements, aside from spending time in testing. if update.critpath: continue if update.meets_testing_requirements: log.info('%s now meets testing requirements' % update.title) update.comment(config.get('testing_approval_msg') % update.days_in_testing, author='bodhi') log.info('approve_testing_updates job complete.')
def save_db(): ## Save each release and it's metrics releases = [] for release in Release.select(): rel = {} for attr in ('name', 'long_name', 'id_prefix', 'dist_tag', 'locked', 'metrics'): rel[attr] = getattr(release, attr) releases.append(rel) updates = [] all_updates = PackageUpdate.select() progress = ProgressBar(maxValue=all_updates.count()) for update in all_updates: data = {} data['title'] = update.title data['builds'] = [(build.package.name, build.nvr) for build in update.builds] data['date_submitted'] = update.date_submitted data['date_pushed'] = update.date_pushed data['date_modified'] = update.date_modified data['release'] = [update.release.name, update.release.long_name, update.release.id_prefix, update.release.dist_tag] data['submitter'] = update.submitter data['update_id'] = hasattr(update, 'update_id') and update.update_id or update.updateid data['type'] = update.type data['karma'] = update.karma data['cves'] = [cve.cve_id for cve in update.cves] data['bugs'] = [] for bug in update.bugs: data['bugs'].append([bug.bz_id, bug.title, bug.security]) if hasattr(bug, 'parent'): data['bugs'][-1].append(bug.parent) else: data['bugs'][-1].append(False) data['status'] = update.status data['pushed'] = update.pushed data['notes'] = update.notes data['request'] = update.request data['comments'] = [(c.timestamp, c.author, c.text, c.karma, c.anonymous) for c in update.comments] if hasattr(update, 'approved'): data['approved'] = update.approved else: data['approved'] = None updates.append(data) progress() # Save all buildroot overrides overrides = [] for override in BuildRootOverride.select(): try: overrides.append(override.__json__()) except: print("Removing stray override: %s" % override) override.destroySelf() dump = file('bodhi-pickledb-%s' % time.strftime("%y%m%d.%H%M"), 'w') pickle.dump({'updates': updates, 'releases': releases, 'overrides': overrides}, dump) dump.close()
def main(): load_config() print "Calculating F11 0day update metrics..." updates = { 'bugfix': [], 'security': [], 'enhancement': [], 'newpackage': [] } date = datetime(*time.strptime('06-09-2009', '%m-%d-%Y')[:-2]) f11 = Release.byName('F11') for update in PackageUpdate.select(PackageUpdate.q.releaseID == f11.id): for comment in update.comments: if comment.author == 'bodhi' and comment.timestamp < date and \ comment.text.startswith('This update has been pushed to stable'): updates[update.type].append(update.title) break pprint(updates) print '=' * 80 print 'F11 0day stats' print ' * %d security' % len(updates['security']) print ' * %d bugfixes' % len(updates['bugfix']) print ' * %d enhancements' % len(updates['enhancement']) print ' * %d newpackage' % len(updates['newpackage'])
def test_file_input(self): bodhi = self.__get_bodhi_client() opts = self.__get_opts() out = file(opts.input_file, 'w') out.write('''[%s] type=enhancement request=testing bugs=123,456 notes=bar autokarma=True stable_karma=10 unstable_karma=-10 close_bugs=True ''' % self.build) out.close() updates = bodhi.parse_file(input_file=opts.input_file) for update_args in updates: bodhi.save(**update_args) update = PackageUpdate.byTitle(self.build) assert update.type == 'enhancement' assert update.request == 'testing' assert update.notes == 'bar', repr(update.notes) for bug in (123, 456): bz = Bugzilla.byBz_id(bug) assert bz in update.bugs os.unlink(opts.input_file)
def main(): load_config() __connection__ = hub = PackageHub("bodhi") if len(sys.argv) != 2: print "Usage: %s <release>" % sys.argv[0] sys.exit(1) try: release = Release.byName(sys.argv[1].upper()) except SQLObjectNotFound: print "Cannot find Release '%s'" % sys.argv[1] sys.exit(1) updates = PackageUpdate.select(PackageUpdate.q.releaseID == release.id) progress = ProgressBar(maxValue=updates.count()) print "Destroying all updates, comments, and bugs associated with %s" % release.name for update in updates: for comment in update.comments: comment.destroySelf() for build in update.builds: build.destroySelf() for bug in update.bugs: if len(bug.updates) == 1: bug.destroySelf() update.destroySelf() progress() release.destroySelf() hub.commit() print
def approve_testing_updates(): """ Scan all testing updates and approve ones that have met the per-release testing requirements. https://fedoraproject.org/wiki/Package_update_acceptance_criteria """ log.info('Running approve_testing_updates job...') for update in PackageUpdate.select( AND(PackageUpdate.q.status == 'testing', PackageUpdate.q.request == None)): # If this release does not have any testing requirements, skip it if not update.release.mandatory_days_in_testing: continue # If this has already met testing requirements, skip it if update.met_testing_requirements: continue # If this is a critpath update, skip it, since they have their own # testing requirements, aside from spending time in testing. if update.critpath: continue if update.meets_testing_requirements: log.info('%s now meets testing requirements' % update.title) update.comment( config.get('testing_approval_msg') % update.days_in_testing, author='bodhi') log.info('approve_testing_updates job complete.')
def default(self, search, *args, **kw): results = set() search = search.strip() # Search name-version-release map(results.add, PackageUpdate.select( LIKE(PackageUpdate.q.title, '%%%s%%' % search), orderBy=PackageUpdate.q.date_submitted)) # Search bug numbers try: map(lambda bug: map(results.add, bug.updates), Bugzilla.select(Bugzilla.q.bz_id==int(search))) except ValueError: # can't convert search search to integer pass # Search CVEs if search.startswith('CVE') or search.startswith('CAN'): # Search bug titles for CVE, since that is how we track them now map(lambda bug: map(results.add, bug.updates), Bugzilla.select(LIKE(Bugzilla.q.title, '%%%s%%' % search))) # We still have some CVE objects lying around, so search them too map(lambda cve: map(results.add, cve.updates), CVE.select(CVE.q.cve_id==search)) # If there is only 1 result, then jump right to it num_items = len(results) if len(results) == 1: raise redirect(results.pop().get_url()) return dict(updates=list(results), num_items=num_items, title="%d Results Found" % num_items)
def refresh(self): """ Refresh all of the metrics for all releases. For each release, initialize our metrics objects, and feed them every update for that release. Do the necessary calculations, and then save our metrics to the database in the Release.metrics PickleCol. """ log.info("Doing a hard refresh of our metrics data") metrics = {} updates = {} # {release: [updates,]} all_updates = list(PackageUpdate.select()) releases = list(Release.select()) for release in releases: updates[release.name] = [] for update in all_updates: updates[update.release.name].append(update) for release in releases: log.debug("Calculating metrics for %s" % release.name) self.init_metrics(release) for update in updates[release.name]: for metric in self.metrics: metric.update(update) for metric in self.metrics: metric.done() metrics[metric.__class__.__name__] = metric.get_data() release.metrics = metrics hub.commit() del all_updates del releases log.info("Metrics generation complete!")
def test_multibuild(self): builds = ['yum-3.2.1-1.fc7', 'httpd-2.2.4-4.1.fc7'] package_builds = [] release = get_rel() update = PackageUpdate(title=','.join(builds), release=release, submitter='*****@*****.**', notes='Testing!', type='bugfix') for build in builds: nvr = get_nvr(build) pkg = Package(name=nvr[0]) b = PackageBuild(nvr=build, package=pkg) package_builds.append(b) map(update.addPackageBuild, package_builds) assert update.builds[0].nvr == builds[0] assert update.builds[1].nvr == builds[1] assert update.title == ','.join(builds) assert update.release.name == 'fc7' assert release.updates[0] == update assert update.status == 'pending' assert update.type == 'bugfix' assert update.notes == 'Testing!' for build in package_builds: assert build.updates[0] == update
def get_security_updates(self, release): release = Release.select(Release.q.long_name==release)[0] return PackageUpdate.select( AND(PackageUpdate.q.releaseID == release.id, PackageUpdate.q.type == 'security', PackageUpdate.q.status == 'testing', PackageUpdate.q.request == None))
def reset_date_pushed(status='testing'): """ Reset the date_pushed on all testing updates with the most recent bodhi comment that relates to it's current status. This needed to happen when a few batches of updates were pushed without a date_pushed field, so we had to recreate it based on bodhi's comments. """ from bodhi.model import PackageUpdate from sqlobject import AND for update in PackageUpdate.select( AND(PackageUpdate.q.date_pushed == None, PackageUpdate.q.status == status)): date = None for comment in update.comments: if comment.author == 'bodhi': if comment.text == 'This update has been pushed to %s' % update.status: if date and comment.timestamp < date: print "Skipping older push %s for %s" % ( comment.timestamp, update.title) else: date = comment.timestamp print "Setting %s to %s" % (update.title, comment.timestamp) update.date_pushed = date
def test_unpush(self): bodhi = self.__get_bodhi_client() opts = self.__get_opts() self.__save_update(self.build, opts, bodhi) opts.request = 'unpush' bodhi.request(update=self.build, request=opts.request) update = PackageUpdate.byTitle(self.build) assert update.status == 'pending'
def test_mine(self): bodhi = self.__get_bodhi_client() opts = self.__get_opts() self.__save_update(self.build, opts, bodhi) assert PackageUpdate.byTitle(self.build) data = bodhi.query(mine=True) assert data['title'] == u"1 update found", repr(data) assert len(data['updates']) == 1
def get_security_updates(self, release): release = Release.select(Release.q.long_name == release)[0] updates = PackageUpdate.select( AND(PackageUpdate.q.releaseID == release.id, PackageUpdate.q.type == 'security', PackageUpdate.q.status == 'testing', PackageUpdate.q.request == None)) updates = self.sort_by_days_in_testing(updates) return updates
def save_db(): ## Save each release and it's metrics releases = [] for release in Release.select(): rel = {} for attr in ('name', 'long_name', 'id_prefix', 'dist_tag', 'locked', 'metrics'): rel[attr] = getattr(release, attr) releases.append(rel) updates = [] all_updates = PackageUpdate.select() progress = ProgressBar(maxValue=all_updates.count()) for update in all_updates: data = {} data['title'] = update.title data['builds'] = [(build.package.name, build.nvr) for build in update.builds] data['date_submitted'] = update.date_submitted data['date_pushed'] = update.date_pushed data['date_modified'] = update.date_modified data['release'] = [ update.release.name, update.release.long_name, update.release.id_prefix, update.release.dist_tag ] data['submitter'] = update.submitter data['update_id'] = hasattr( update, 'update_id') and update.update_id or update.updateid data['type'] = update.type data['karma'] = update.karma data['cves'] = [cve.cve_id for cve in update.cves] data['bugs'] = [] for bug in update.bugs: data['bugs'].append([bug.bz_id, bug.title, bug.security]) if hasattr(bug, 'parent'): data['bugs'][-1].append(bug.parent) else: data['bugs'][-1].append(False) data['status'] = update.status data['pushed'] = update.pushed data['notes'] = update.notes data['request'] = update.request data['comments'] = [(c.timestamp, c.author, c.text, c.karma, c.anonymous) for c in update.comments] if hasattr(update, 'approved'): data['approved'] = update.approved else: data['approved'] = None updates.append(data) progress() dump = file('bodhi-pickledb-%s' % time.strftime("%y%m%d.%H%M"), 'w') pickle.dump({'updates': updates, 'releases': releases}, dump) dump.close()
def save_db(): ## Save each release and it's metrics releases = [] for release in Release.select(): rel = {} for attr in ("name", "long_name", "id_prefix", "dist_tag", "locked", "metrics"): rel[attr] = getattr(release, attr) releases.append(rel) updates = [] all_updates = PackageUpdate.select() progress = ProgressBar(maxValue=all_updates.count()) for update in all_updates: data = {} data["title"] = update.title data["builds"] = [(build.package.name, build.nvr) for build in update.builds] data["date_submitted"] = update.date_submitted data["date_pushed"] = update.date_pushed data["date_modified"] = update.date_modified data["release"] = [ update.release.name, update.release.long_name, update.release.id_prefix, update.release.dist_tag, ] data["submitter"] = update.submitter data["update_id"] = hasattr(update, "update_id") and update.update_id or update.updateid data["type"] = update.type data["karma"] = update.karma data["cves"] = [cve.cve_id for cve in update.cves] data["bugs"] = [] for bug in update.bugs: data["bugs"].append([bug.bz_id, bug.title, bug.security]) if hasattr(bug, "parent"): data["bugs"][-1].append(bug.parent) else: data["bugs"][-1].append(False) data["status"] = update.status data["pushed"] = update.pushed data["notes"] = update.notes data["request"] = update.request data["comments"] = [(c.timestamp, c.author, c.text, c.karma, c.anonymous) for c in update.comments] if hasattr(update, "approved"): data["approved"] = update.approved else: data["approved"] = None updates.append(data) progress() dump = file("bodhi-pickledb-%s" % time.strftime("%y%m%d.%H%M"), "w") pickle.dump({"updates": updates, "releases": releases}, dump) dump.close()
def get_unapproved_critpath_updates(self, release): release = Release.select(Release.q.long_name == release)[0] updates = [] for update in PackageUpdate.select( AND(PackageUpdate.q.releaseID == release.id, PackageUpdate.q.status == 'testing', PackageUpdate.q.request == None), orderBy=PackageUpdate.q.date_submitted).reversed(): if update.critpath and not update.critpath_approved: updates.append(update) updates = self.sort_by_days_in_testing(updates) return updates
def get_unapproved_critpath_updates(self, release): release = Release.select(Release.q.long_name==release)[0] updates = [] for update in PackageUpdate.select( AND(PackageUpdate.q.releaseID == release.id, PackageUpdate.q.status != 'stable', PackageUpdate.q.status != 'obsolete', PackageUpdate.q.request == None), orderBy=PackageUpdate.q.date_submitted).reversed(): if update.critpath and not update.critpath_approved: updates.append(update) return updates
def test_new_update(self): bodhi = self.__get_bodhi_client() opts = self.__get_opts() self.__save_update(self.build, opts, bodhi) update = PackageUpdate.byTitle(self.build) assert update and update.title == self.build assert update.release.name == opts.release.upper() assert update.type == opts.type_ assert update.notes == opts.notes for bug in opts.bugs.split(','): bz = Bugzilla.byBz_id(int(bug)) assert bz in update.bugs
def clean_tables(): from bodhi.model import (Release, Package, PackageBuild, PackageUpdate, Comment, CVE, Bugzilla, BuildRootOverride) from bodhi.identity.tables import (Visit, VisitIdentity, Group, User, Permission) print "Cleaning out tables" Release.dropTable(ifExists=True, cascade=True) Package.dropTable(ifExists=True, cascade=True) PackageBuild.dropTable(ifExists=True, cascade=True) PackageUpdate.dropTable(ifExists=True, cascade=True) Comment.dropTable(ifExists=True, cascade=True) CVE.dropTable(ifExists=True, cascade=True) Bugzilla.dropTable(ifExists=True, cascade=True) BuildRootOverride.dropTable(ifExists=True, cascade=True) Visit.dropTable(ifExists=True, cascade=True) VisitIdentity.dropTable(ifExists=True, cascade=True) Group.dropTable(ifExists=True, cascade=True) User.dropTable(ifExists=True, cascade=True) Permission.dropTable(ifExists=True, cascade=True) hub.commit() Release.createTable(ifNotExists=True) Package.createTable(ifNotExists=True) PackageBuild.createTable(ifNotExists=True) PackageUpdate.createTable(ifNotExists=True) Comment.createTable(ifNotExists=True) CVE.createTable(ifNotExists=True) Bugzilla.createTable(ifNotExists=True) BuildRootOverride.createTable(ifNotExists=True) Visit.createTable(ifNotExists=True) VisitIdentity.createTable(ifNotExists=True) Group.createTable(ifNotExists=True) User.createTable(ifNotExists=True) Permission.createTable(ifNotExists=True)
def push(self): """ List updates tagged with a push/unpush/move request """ updates = [] resume = False mash = self._current_mash() if not mash: flash_log("A masher exception has occured.") return dict(updates=[], resume=False) if mash['mashing']: flash_log('The masher is currently pushing updates') else: for update in mash.get('updates', []): try: updates.append(PackageUpdate.byTitle(update)) except SQLObjectNotFound: log.warning("Cannot find update %s in push queue" % update) if updates: flash_log('There is an updates push ready to be resumed') resume = True else: # Get a list of all updates with a request that aren't # unapproved security updates, or for a locked release requests = PackageUpdate.select( PackageUpdate.q.request != None) # Come F13+, bodhi will not have locked releases. It will # implement the 'No Frozen Rawhide' proposal, and treat 'locked' # releases as pending. #requests = filter(lambda update: not update.release.locked, # PackageUpdate.select( # PackageUpdate.q.request != None)) for update in requests: # Disable security approval requirement #if update.type == 'security' and not update.approved: # continue updates.append(update) return dict(updates=updates, resume=resume)
def push(self): """ List updates tagged with a push/unpush/move request """ updates = [] resume = False mash = self._current_mash() if not mash: flash_log("A masher exception has occured.") return dict(updates=[], resume=False) if mash['mashing']: flash_log('The masher is currently pushing updates') else: for update in mash.get('updates', []): try: updates.append(PackageUpdate.byTitle(update)) except SQLObjectNotFound: log.warning("Cannot find update %s in push queue" % update) if updates: flash_log('There is an updates push ready to be resumed') resume = True else: # Get a list of all updates with a request that aren't # unapproved security updates, or for a locked release requests = PackageUpdate.select(PackageUpdate.q.request != None) # Come F13+, bodhi will not have locked releases. It will # implement the 'No Frozen Rawhide' proposal, and treat 'locked' # releases as pending. #requests = filter(lambda update: not update.release.locked, # PackageUpdate.select( # PackageUpdate.q.request != None)) for update in requests: # Disable security approval requirement #if update.type == 'security' and not update.approved: # continue updates.append(update) return dict(updates=updates, resume=resume)
def get_critpath_updates(self, release=None, unapproved=None): i = 0 entries = [] base = config.get('base_address') title = 'Latest Critical Path Updates' query = [PackageUpdate.q.status != 'obsolete'] if release: try: release = Release.byName(release) except SQLObjectNotFound: return dict(title = '%s release not found' % release, entries=[]) releases = [release] title = title + ' for %s' % release.long_name else: releases = Release.select() if unapproved: query.append(PackageUpdate.q.status != 'stable') for update in PackageUpdate.select( AND(OR(*[PackageUpdate.q.releaseID == release.id for release in releases]), *query), orderBy=PackageUpdate.q.date_submitted).reversed(): delta = datetime.utcnow() - update.date_submitted if delta and delta.days > config.get('feeds.num_days_to_show'): if len(entries) >= config.get('feeds.max_entries'): break if update.critpath: if unapproved: if update.critpath_approved: continue entries.append({ 'id' : base + url(update.get_url()), 'summary' : update.notes, 'link' : base + url(update.get_url()), 'published' : update.date_submitted, 'updated' : update.date_submitted, 'title' : update.title, }) i += 1 return dict( title = title, subtitle = "", link = config.get('base_address') + url('/'), entries = entries )
def main(): unstable = subprocess.Popen( 'grep "\[Fedora Update\] \[unstable\]" bodhi.logs', stdout=subprocess.PIPE, shell=True) out, err = unstable.communicate() (unstable_updates, unstable_critpath, unstable_deltas, unstable_accum, unstable_occur) = parse_output(out) stable = subprocess.Popen( 'grep "\[Fedora Update\] \[stablekarma\]" bodhi.logs', stdout=subprocess.PIPE, shell=True) out, err = stable.communicate() (stable_updates, stable_critpath, stable_deltas, stable_accum, stable_occur) = parse_output(out) for release in Release.select(): print '\n' + header(release.long_name) num_updates = PackageUpdate.select( PackageUpdate.q.releaseID == release.id).count() num_stable = len(stable_updates[release.name]) num_unstable = len(unstable_updates[release.name]) num_testing = len(unstable_deltas) + len(stable_deltas) print " * %d updates automatically unpushed due to karma (%0.2f%%)" % ( num_unstable, float(num_unstable) / num_updates * 100) print " * %d of which were critical path updates" % ( unstable_critpath[release.name]) print " * %d updates automatically pushed due to karma (%0.2f%%)" % ( num_stable, float(num_stable) / num_updates * 100) print " * %d of which were critical path updates" % ( stable_critpath[release.name]) print " * Time spent in testing of updates that were pushed by karma:" print " * mean = %d days" % (stable_accum.days / len(stable_deltas)) print " * median = %d days" % stable_deltas[len(stable_deltas) / 2].days print " * mode = %d days" % sorted(stable_occur.items(), key=itemgetter(1))[-1][0] print " * Time spent in testing of updates that were unpushed by karma:" print " * mean = %d days" % (unstable_accum.days / len(unstable_deltas)) print " * median = %d days" % unstable_deltas[len(unstable_deltas) / 2].days print " * mode = %d days" % sorted(unstable_occur.items(), key=itemgetter(1))[-1][0]
def get_critpath_updates(self, release=None, unapproved=None): i = 0 entries = [] base = config.get('base_address') title = 'Latest Critical Path Updates' query = [PackageUpdate.q.status != 'obsolete'] if release: try: release = Release.byName(release) except SQLObjectNotFound: return dict(title='%s release not found' % release, entries=[]) releases = [release] title = title + ' for %s' % release.long_name else: releases = Release.select() if unapproved: query.append(PackageUpdate.q.status != 'stable') for update in PackageUpdate.select( AND( OR(*[ PackageUpdate.q.releaseID == release.id for release in releases ]), *query), orderBy=PackageUpdate.q.date_submitted).reversed(): delta = datetime.utcnow() - update.date_submitted if delta and delta.days > config.get('feeds.num_days_to_show'): if len(entries) >= config.get('feeds.max_entries'): break if update.critpath: if unapproved: if update.critpath_approved: continue entries.append({ 'id': base + url(update.get_url()), 'summary': update.notes, 'link': base + url(update.get_url()), 'published': update.date_submitted, 'updated': update.date_submitted, 'title': update.title, }) i += 1 return dict(title=title, subtitle="", link=config.get('base_address') + url('/'), entries=entries)
def clean_testing_builds(untag=False): koji = get_session() for release in Release.select(): stable_builds = koji.listTagged(release.stable_tag, latest=True) stable_nvrs = [build["nvr"] for build in stable_builds] print "Fetched %d builds tagged with %s" % (len(stable_builds), release.stable_tag) testing_builds = koji.listTagged(release.testing_tag, latest=True) print "Fetched %d builds tagged with %s" % (len(testing_builds), release.testing_tag) testing_nvrs = [build["nvr"] for build in testing_builds] for testing_build in testing_builds: for build in testing_builds: compare_builds(testing_build, build, untag, release.testing_tag) for build in stable_builds: compare_builds(testing_build, build, untag, release.testing_tag) # Find testing updates that aren't in the list of latest builds for update in PackageUpdate.select( AND( PackageUpdate.q.releaseID == release.id, PackageUpdate.q.status == "testing", PackageUpdate.q.request == None, ) ): for build in update.builds: if build.nvr not in testing_nvrs: latest_testing = None latest_stable = None for testing in testing_nvrs: if testing.startswith(build.package.name + "-"): latest_testing = testing break for stable in stable_nvrs: if stable.startswith(build.package.name + "-"): latest_stable = stable break if latest_testing: koji_build = koji.getBuild(build.nvr) latest_build = koji.getBuild(latest_testing) if rpm.labelCompare(build_evr(koji_build), build_evr(latest_build)) < 0: print "%s in testing, latest_testing = %s, latest_stable = %s" % ( update.title, latest_testing, latest_stable, ) if untag: print "Obsoleting %s" % update.title update.obsolete(newer=latest_testing)
def clean_testing_builds(untag=False): koji = get_session() for release in Release.select(): stable_builds = koji.listTagged(release.stable_tag, latest=True) stable_nvrs = [build['nvr'] for build in stable_builds] print "Fetched %d builds tagged with %s" % (len(stable_builds), release.stable_tag) testing_builds = koji.listTagged(release.testing_tag, latest=True) print "Fetched %d builds tagged with %s" % (len(testing_builds), release.testing_tag) testing_nvrs = [build['nvr'] for build in testing_builds] for testing_build in testing_builds: for build in testing_builds: compare_builds(testing_build, build, untag, release.testing_tag) for build in stable_builds: compare_builds(testing_build, build, untag, release.testing_tag) # Find testing updates that aren't in the list of latest builds for update in PackageUpdate.select( AND(PackageUpdate.q.releaseID == release.id, PackageUpdate.q.status == 'testing', PackageUpdate.q.request == None)): for build in update.builds: if build.nvr not in testing_nvrs: latest_testing = None latest_stable = None for testing in testing_nvrs: if testing.startswith(build.package.name + '-'): latest_testing = testing break for stable in stable_nvrs: if stable.startswith(build.package.name + '-'): latest_stable = stable break if latest_testing: koji_build = koji.getBuild(build.nvr) latest_build = koji.getBuild(latest_testing) if rpm.labelCompare(build_evr(koji_build), build_evr(latest_build)) < 0: print "%s in testing, latest_testing = %s, latest_stable = %s" % ( update.title, latest_testing, latest_stable) if untag: print "Obsoleting %s" % update.title update.obsolete(newer=latest_testing)
def main(): load_config() print "Calculating F11 0day update metrics..." updates = {'bugfix': [], 'security': [], 'enhancement': [], 'newpackage': []} date = datetime(*time.strptime('06-09-2009', '%m-%d-%Y')[:-2]) f11 = Release.byName('F11') for update in PackageUpdate.select(PackageUpdate.q.releaseID==f11.id): for comment in update.comments: if comment.author == 'bodhi' and comment.timestamp < date and \ comment.text.startswith('This update has been pushed to stable'): updates[update.type].append(update.title) break pprint(updates) print '=' * 80 print 'F11 0day stats' print ' * %d security' % len(updates['security']) print ' * %d bugfixes' % len(updates['bugfix']) print ' * %d enhancements' % len(updates['enhancement']) print ' * %d newpackage' % len(updates['newpackage'])
def main(): unstable = subprocess.Popen('grep "\[Fedora Update\] \[unstable\]" bodhi.logs', stdout=subprocess.PIPE, shell=True) out, err = unstable.communicate() (unstable_updates, unstable_critpath, unstable_deltas, unstable_accum, unstable_occur) = parse_output(out) stable = subprocess.Popen('grep "\[Fedora Update\] \[stablekarma\]" bodhi.logs', stdout=subprocess.PIPE, shell=True) out, err = stable.communicate() (stable_updates, stable_critpath, stable_deltas, stable_accum, stable_occur) = parse_output(out) for release in Release.select(): print '\n' + header(release.long_name) num_updates = PackageUpdate.select( PackageUpdate.q.releaseID==release.id).count() num_stable = len(stable_updates[release.name]) num_unstable = len(unstable_updates[release.name]) num_testing = len(unstable_deltas) + len(stable_deltas) print " * %d updates automatically unpushed due to karma (%0.2f%%)" % ( num_unstable, float(num_unstable) / num_updates * 100) print " * %d of which were critical path updates" % ( unstable_critpath[release.name]) print " * %d updates automatically pushed due to karma (%0.2f%%)" % ( num_stable, float(num_stable) / num_updates * 100) print " * %d of which were critical path updates" % ( stable_critpath[release.name]) print " * Time spent in testing of updates that were pushed by karma:" print " * mean = %d days" % (stable_accum.days / len(stable_deltas)) print " * median = %d days" % stable_deltas[len(stable_deltas)/2].days print " * mode = %d days" % sorted(stable_occur.items(), key=itemgetter(1))[-1][0] print " * Time spent in testing of updates that were unpushed by karma:" print " * mean = %d days" % (unstable_accum.days / len(unstable_deltas)) print " * median = %d days" % unstable_deltas[len(unstable_deltas)/2].days print " * mode = %d days" % sorted(unstable_occur.items(), key=itemgetter(1))[-1][0]
def reset_date_pushed(status='testing'): """ Reset the date_pushed on all testing updates with the most recent bodhi comment that relates to it's current status. This needed to happen when a few batches of updates were pushed without a date_pushed field, so we had to recreate it based on bodhi's comments. """ from bodhi.model import PackageUpdate from sqlobject import AND for update in PackageUpdate.select(AND(PackageUpdate.q.date_pushed==None, PackageUpdate.q.status==status)): date = None for comment in update.comments: if comment.author == 'bodhi': if comment.text == 'This update has been pushed to %s' % update.status: if date and comment.timestamp < date: print "Skipping older push %s for %s" % (comment.timestamp, update.title) else: date = comment.timestamp print "Setting %s to %s" % (update.title, comment.timestamp) update.date_pushed = date
def mash(self, updates=None, resume=False, **kw): """ Mash a list of PackageUpdate objects. If this instance is deployed with a remote masher, then it simply proxies the request. If we are the masher, then send these updates to our Mash instance. This will then start a thread that takes care of handling all of the update requests, composing fresh repositories, generating and sending update notices, closing bugs, etc. """ if not updates: updates = [] if not isinstance(updates, list): if isinstance(updates, basestring): log.debug("Doing json hack") try: updates = json.loads( updates.replace("u'", "\"").replace("'", "\"")) except: log.debug("Didn't work, assuming it's a single update...") updates = [updates] else: updates = [updates] # If we're not The Masher, then proxy this request to it if config.get('masher'): data = self._masher_request( '/admin/mash', updates=updates, resume=resume) or {} flash_log('Push request %s' % (data.get('success') and 'succeeded' or 'failed')) raise redirect('/admin/masher') from bodhi.masher import masher masher.queue([PackageUpdate.byTitle(title) for title in updates], resume=resume) if request_format() == 'json': return dict(success=True) flash("Updates queued for mashing") raise redirect('/admin/masher')
def mash(self, updates=None, resume=False, **kw): """ Mash a list of PackageUpdate objects. If this instance is deployed with a remote masher, then it simply proxies the request. If we are the masher, then send these updates to our Mash instance. This will then start a thread that takes care of handling all of the update requests, composing fresh repositories, generating and sending update notices, closing bugs, etc. """ if not updates: updates = [] if not isinstance(updates, list): if isinstance(updates, basestring): log.debug("Doing json hack") try: updates = json.loads(updates.replace("u'", "\"").replace("'", "\"")) except: log.debug("Didn't work, assuming it's a single update...") updates = [updates] else: updates = [updates] # If we're not The Masher, then proxy this request to it if config.get('masher'): data = self._masher_request('/admin/mash', updates=updates, resume=resume) or {} flash_log('Push request %s' % (data.get('success') and 'succeeded' or 'failed')) raise redirect('/admin/masher') from bodhi.masher import masher masher.queue([PackageUpdate.byTitle(title) for title in updates], resume=resume) if request_format() == 'json': return dict(success=True) flash("Updates queued for mashing") raise redirect('/admin/masher')
def default(self, search, *args, **kw): results = set() search = search.strip() # Search name-version-release map( results.add, PackageUpdate.select(LIKE(PackageUpdate.q.title, '%%%s%%' % search), orderBy=PackageUpdate.q.date_submitted)) # Search bug numbers try: map(lambda bug: map(results.add, bug.updates), Bugzilla.select(Bugzilla.q.bz_id == int(search))) except ValueError: # can't convert search search to integer pass # Search CVEs if search.startswith('CVE') or search.startswith('CAN'): # Search bug titles for CVE, since that is how we track them now map(lambda bug: map(results.add, bug.updates), Bugzilla.select(LIKE(Bugzilla.q.title, '%%%s%%' % search))) # We still have some CVE objects lying around, so search them too map(lambda cve: map(results.add, cve.updates), CVE.select(CVE.q.cve_id == search)) # If there is only 1 result, then jump right to it num_items = len(results) if len(results) == 1: raise redirect(results.pop().get_url()) return dict(updates=list(results), num_items=num_items, title="%d Results Found" % num_items)
""" conffile = tempfile.mktemp() fd = open(conffile, 'w') fd.write(confheader) repo = repo_config % { 'testrepo': self.testrepo_dir, 'rel': release.repodir, 'arch': arch.name, 'testing': testing and '1' or '0', 'final': self.final and '1' or '0' } fd.write(repo) fd.close() return conffile # # Main method used for testing purposes # if __name__ == '__foo__': from turbogears.database import PackageHub hub = PackageHub("bodhi") __connection__ = hub log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) closure = TestRepoClosure(PackageUpdate.select()) closure.run()