def finish(self, success): self.log.info('Thread(%s) finished. Success: %r' % (self.id, success)) notifications.publish( topic="mashtask.complete", msg=dict(success=success, repo=self.id), force=True, )
def work(self, msg): """Begin the push process. Here we organize & prioritize the updates, and fire off seperate threads for each reop tag being mashed. If there are any security updates in the push, then those repositories will be executed before all others. """ body = msg['body']['msg'] resume = body.get('resume', False) notifications.publish(topic="mashtask.start", msg=dict(), force=True) with self.db_factory() as session: releases = self.organize_updates(session, body) batches = self.prioritize_updates(releases) # Important repos first, then normal for batch in batches: # Stable first, then testing for req in ('stable', 'testing'): threads = [] for release, request, updates in batch: if request == req: self.log.info('Starting thread for %s %s for %d updates', release, request, len(updates)) thread = MasherThread(release, request, updates, self.log, self.db_factory, self.mash_dir, resume) threads.append(thread) thread.start() for thread in threads: thread.join() self.log.info('Push complete!')
def eject_from_mash(self, update, reason): update.locked = False text = '%s ejected from the push because %r' % (update.title, reason) log.warn(text) update.comment(self.db, text, author=u'bodhi') # Remove the pending tag as well if update.request is UpdateRequest.stable: update.remove_tag(update.release.pending_stable_tag, koji=self.koji) elif update.request is UpdateRequest.testing: update.remove_tag(update.release.pending_testing_tag, koji=self.koji) update.request = None if update in self.state['updates']: self.state['updates'].remove(update) if update in self.updates: self.updates.remove(update) notifications.publish( topic="update.eject", msg=dict( repo=self.id, update=update, reason=reason, request=self.request, release=self.release, ), force=True, )
def delete_stack(request): """Delete a stack""" stack = request.validated["stack"] notifications.publish(topic="stack.delete", msg=dict(stack=stack, agent=request.user.name)) request.db.delete(stack) log.info("Deleted stack: %s", stack.name) return dict(status=u"success")
def delete_stack(request): """Delete a stack""" stack = request.validated['stack'] notifications.publish(topic='stack.delete', msg=dict(stack=stack, agent=request.user.name)) request.db.delete(stack) log.info('Deleted stack: %s', stack.name) return dict(status=u'success')
def send_notifications(self): self.log.info("Sending notifications") try: agent = os.getlogin() except OSError: # this can happen when building on koji agent = u"masher" for update in self.updates: topic = u"update.complete.%s" % update.status notifications.publish(topic=topic, msg=dict(update=update, agent=agent), force=True)
def send_notifications(self): self.log.info('Sending notifications') try: agent = os.getlogin() except OSError: # this can happen when building on koji agent = u'masher' for update in self.updates: topic = u'update.complete.%s' % update.status notifications.publish( topic=topic, msg=dict(update=update, agent=agent), force=True, )
def wait_for_sync(self): """Block until our repomd.xml hits the master mirror""" self.log.info('Waiting for updates to hit the master mirror') notifications.publish( topic="mashtask.sync.wait", msg=dict(repo=self.id), force=True, ) mash_path = os.path.join(self.path, self.id) arch = os.listdir(mash_path)[0] release = self.release.id_prefix.lower().replace('-', '_') request = self.request.value key = '%s_%s_master_repomd' % (release, request) master_repomd = config.get(key) if not master_repomd: raise ValueError("Could not find %s in the config file" % key) repomd = os.path.join(mash_path, arch, 'repodata', 'repomd.xml') if not os.path.exists(repomd): self.log.error('Cannot find local repomd: %s', repomd) return checksum = hashlib.sha1(file(repomd).read()).hexdigest() while True: try: url = master_repomd % (self.release.version, arch) self.log.info('Polling %s' % url) masterrepomd = urllib2.urlopen(url) except (urllib2.URLError, urllib2.HTTPError): self.log.exception('Error fetching repomd.xml') time.sleep(200) continue newsum = hashlib.sha1(masterrepomd.read()).hexdigest() if newsum == checksum: self.log.info("master repomd.xml matches!") notifications.publish( topic="mashtask.sync.done", msg=dict(repo=self.id), force=True, ) return self.log.debug("master repomd.xml doesn't match! %s != %s for %r", checksum, newsum, self.id) time.sleep(200)
def work(self, msg): """Begin the push process. Here we organize & prioritize the updates, and fire off seperate threads for each reop tag being mashed. If there are any security updates in the push, then those repositories will be executed before all others. """ body = msg['body']['msg'] resume = body.get('resume', False) notifications.publish(topic="mashtask.start", msg=dict(), force=True) with self.db_factory() as session: releases = self.organize_updates(session, body) batches = self.prioritize_updates(releases) results = [] # Important repos first, then normal for batch in batches: # Stable first, then testing for req in ('stable', 'testing'): threads = [] for release, request, updates in batch: if request == req: self.log.info( 'Starting thread for %s %s for %d updates', release, request, len(updates)) thread = MasherThread(release, request, updates, self.log, self.db_factory, self.mash_dir, resume) threads.append(thread) thread.start() for thread in threads: thread.join() for result in thread.results(): results.append(result) self.log.info('Push complete! Summary follows:') for result in results: self.log.info(result)
def eject_from_mash(self, update, reason): update.locked = False text = '%s ejected from the push because %r' % (update.title, reason) log.warn(text) update.comment(text, author=u'bodhi') update.request = None if update in self.state['updates']: self.state['updates'].remove(update) if update in self.updates: self.updates.remove(update) notifications.publish( topic="update.eject", msg=dict( repo=self.id, update=update, reason=reason, request=self.request, release=self.release, ), force=True, )
def save_stack(request): """Save a stack""" data = request.validated db = request.db user = User.get(request.user.name, db) # Fetch or create the stack stack = Stack.get(data['name'], db) if not stack: stack = Stack(name=data['name'], users=[user]) db.add(stack) db.flush() if stack.users or stack.groups: if user in stack.users: log.info('%s is an owner of the %s', user.name, stack.name) else: for group in user.groups: if group in stack.groups: log.info('%s is a member of the %s group', user.name, stack.name) break else: log.warn('%s is not an owner of the %s stack', user.name, stack.name) log.debug('owners = %s; groups = %s', stack.users, stack.groups) request.errors.add( 'body', 'name', '%s does not have privileges' ' to modify the %s stack' % (user.name, stack.name)) request.errors.status = HTTPForbidden.code return # Update the stack description desc = data['description'] if desc: stack.description = desc # Update the stack requirements # If the user passed in no value at all for requirements, then use # the site defaults. If, however, the user passed in the empty string, we # assume they mean *really*, no requirements so we leave the value null. reqs = data['requirements'] if reqs is None: stack.requirements = request.registry.settings.get('site_requirements') elif reqs: stack.requirements = reqs stack.update_relationship('users', User, data, db) stack.update_relationship('groups', Group, data, db) # We make a special case out of packages here, since when a package is # added to a stack, we want to give it the same requirements as the stack # has. See https://github.com/fedora-infra/bodhi/issues/101 new, same, rem = stack.update_relationship('packages', Package, data, db) if stack.requirements: additional = list(tokenize(stack.requirements)) for name in new: package = Package.get(name, db) original = package.requirements original = [] if not original else list(tokenize(original)) package.requirements = " ".join(list(set(original + additional))) log.info('Saved %s stack', data['name']) notifications.publish(topic='stack.save', msg=dict(stack=stack, agent=user.name)) return dict(stack=stack)
def work(self): self.koji = buildsys.get_session() self.release = self.db.query(Release)\ .filter_by(name=self.release).one() self.id = getattr(self.release, '%s_tag' % self.request.value) # Set our thread's "name" so it shows up nicely in the logs. # https://docs.python.org/2/library/threading.html#thread-objects self.name = self.id # For 'pending' branched releases, we only want to perform repo-related # tasks for testing updates. For stable updates, we should just add the # dist_tag and do everything else other than mashing/updateinfo, since # the nightly build-branched cron job mashes for us. self.skip_mash = False if (self.release.state is ReleaseState.pending and self.request is UpdateRequest.stable): self.skip_mash = True self.log.info('Running MasherThread(%s)' % self.id) self.init_state() if not self.resume: self.init_path() notifications.publish( topic="mashtask.mashing", msg=dict(repo=self.id, updates=self.state['updates']), force=True, ) try: if self.resume: self.load_state() else: self.save_state() self.load_updates() self.verify_updates() if self.request is UpdateRequest.stable: self.perform_gating() self.determine_and_perform_tag_actions() self.update_security_bugs() self.expire_buildroot_overrides() self.remove_pending_tags() self.update_comps() if self.resume and self.path in self.state['completed_repos']: self.log.info('Skipping completed repo: %s', self.path) self.complete_requests() # We still need to generate the testing digest, since it's stored in memory self.generate_testing_digest() else: if not self.skip_mash: mash_thread = self.mash() # Things we can do while we're mashing self.complete_requests() self.generate_testing_digest() if not self.skip_mash: uinfo = self.generate_updateinfo() self.wait_for_mash(mash_thread) uinfo.insert_updateinfo() uinfo.insert_pkgtags() uinfo.cache_repodata() # Compose OSTrees from our freshly mashed repos if config.get('compose_atomic_trees'): self.compose_atomic_trees() if not self.skip_mash: self.sanity_check_repo() self.stage_repo() # Wait for the repo to hit the master mirror self.wait_for_sync() # Send fedmsg notifications self.send_notifications() # Update bugzillas self.modify_bugs() # Add comments to updates self.status_comments() # Announce stable updates to the mailing list self.send_stable_announcements() # Email updates-testing digest self.send_testing_digest() self.success = True self.remove_state() self.unlock_updates() self.check_all_karma_thresholds() except: self.log.exception('Exception in MasherThread(%s)' % self.id) self.save_state() raise finally: self.finish(self.success)
def save_stack(request): """Save a stack""" data = request.validated db = request.db user = User.get(request.user.name, db) # Fetch or create the stack stack = Stack.get(data["name"], db) if not stack: stack = Stack(name=data["name"], users=[user]) db.add(stack) db.flush() if stack.users or stack.groups: if user in stack.users: log.info("%s is an owner of the %s", user.name, stack.name) else: for group in user.groups: if group in stack.groups: log.info("%s is a member of the %s group", user.name, stack.name) break else: log.warn("%s is not an owner of the %s stack", user.name, stack.name) log.debug("owners = %s; groups = %s", stack.users, stack.groups) request.errors.add( "body", "name", "%s does not have privileges" " to modify the %s stack" % (user.name, stack.name) ) request.errors.status = HTTPForbidden.code return # Update the stack description desc = data["description"] if desc: stack.description = desc # Update the stack requirements # If the user passed in no value at all for requirements, then use # the site defaults. If, however, the user passed in the empty string, we # assume they mean *really*, no requirements so we leave the value null. reqs = data["requirements"] if reqs is None: stack.requirements = request.registry.settings.get("site_requirements") elif reqs: stack.requirements = reqs stack.update_relationship("users", User, data, db) stack.update_relationship("groups", Group, data, db) # We make a special case out of packages here, since when a package is # added to a stack, we want to give it the same requirements as the stack # has. See https://github.com/fedora-infra/bodhi/issues/101 new, same, rem = stack.update_relationship("packages", Package, data, db) if stack.requirements: additional = list(tokenize(stack.requirements)) for name in new: package = Package.get(name, db) original = package.requirements original = [] if not original else list(tokenize(original)) package.requirements = " ".join(list(set(original + additional))) log.info("Saved %s stack", data["name"]) notifications.publish(topic="stack.save", msg=dict(stack=stack, agent=user.name)) return dict(stack=stack)