def check_package_state(package, prev_state): new_state = package.msg_state_string if prev_state != new_state: dispatch_event('package_state_change', package=package, prev_state=prev_state, new_state=new_state)
def resolve_repo(self, collection, repo_id, sack): """ Resolves given repo base buildroot. Stores buildroot problems if any. Updates collection metadata (latest_repo_id, latest_repo_resolved). Commits. :param: sack sack used for dependency resolution :param: collection collection to which the repo belongs :param: repo_id numeric id of the koji repo """ self.log.info( "Generating new repo (repo_id={}, collection={})".format( repo_id, collection.name, ) ) build_group = self.get_build_group(collection, repo_id) resolved, base_problems, _ = self.resolve_dependencies(sack, [], build_group) self.db.query(BuildrootProblem)\ .filter_by(collection_id=collection.id)\ .delete() prev_state = collection.state_string collection.latest_repo_id = repo_id collection.latest_repo_resolved = resolved new_state = collection.state_string if not resolved: self.log.info("Build group not resolvable for {}" .format(collection.name)) self.db.execute(BuildrootProblem.__table__.insert(), [{'collection_id': collection.id, 'problem': problem} for problem in base_problems]) self.db.commit() dispatch_event('collection_state_change', self.session, collection=collection, prev_state=prev_state, new_state=new_state)
def resolve_repo(self, collection, repo_id, sack): """ Resolves given repo base buildroot. Stores buildroot problems if any. Updates collection metadata (latest_repo_id, latest_repo_resolved). Commits. :param: sack sack used for dependency resolution :param: collection collection to which the repo belongs :param: repo_id numeric id of the koji repo """ self.log.info( "Generating new repo (repo_id={}, collection={})".format( repo_id, collection.name, ) ) build_group = self.get_build_group(collection, repo_id) resolved, base_problems, _ = self.resolve_dependencies(sack, [], build_group) self.db.query(BuildrootProblem)\ .filter_by(collection_id=collection.id)\ .delete() prev_state = collection.state_string collection.latest_repo_id = repo_id collection.latest_repo_resolved = resolved new_state = collection.state_string if not resolved: self.log.info("Build group not resolvable for {}" .format(collection.name)) self.db.execute(BuildrootProblem.__table__.insert(), [{'collection_id': collection.id, 'problem': problem} for problem in base_problems]) self.db.commit() dispatch_event('collection_state_change', self.session, collection=collection, prev_state=prev_state, new_state=new_state)
def test_event(self): package = self.prepare_package('rnv') self.prepare_group('c', content=['rnv']) self.prepare_group('xml', namespace='foo', content=['rnv']) with patch('fedmsg.publish') as publish: plugin.dispatch_event('package_state_change', self.session, package=package, prev_state='failed', new_state='ok') publish.assert_called_once_with(topic='package.state.change', modname='koschei', msg={'name': 'rnv', 'old': 'failed', 'new': 'ok', 'repo': 'f25', 'collection': 'f25', 'collection_name': 'Fedora Rawhide', 'koji_instance': 'primary', 'groups': ['c', 'foo/xml']}) publish.reset_mock() plugin.dispatch_event('collection_state_change', self.session, collection=package.collection, prev_state='unresolved', new_state='ok') publish.assert_called_once_with(topic='collection.state.change', modname='koschei', msg={'old': 'unresolved', 'new': 'ok', 'collection': 'f25', 'collection_name': 'Fedora Rawhide', 'repo_id': 123, 'koji_instance': 'primary'})
def test_same_state(self): package = self.prepare_package() with patch('fedmsg.publish') as publish: plugin.dispatch_event('package_state_change', package=package, prev_state='ok', new_state='ok') self.assertFalse(publish.called)
def main(self): for _, _, topic, msg in fedmsg.tail_messages(): self.notify_watchdog() try: if topic.startswith(get_config('fedmsg.topic') + '.'): self.consume(topic, msg) plugin.dispatch_event('fedmsg_event', self.session, topic, msg) finally: self.db.rollback() self.memory_check()
def callback(message): self.notify_watchdog() topic = message.topic msg = {'msg': message.body} try: if topic.startswith(get_config('fedmsg.topic') + '.'): self.consume(topic, msg) plugin.dispatch_event('fedmsg_event', self.session, topic, msg) finally: self.db.rollback() self.memory_check()
def test_same_state(self): package = self.prepare_package('rnv') self.prepare_group('c', content=['rnv']) self.prepare_group('xml', namespace='foo', content=['rnv']) with patch('fedmsg.publish') as publish: plugin.dispatch_event('package_state_change', self.session, package=package, prev_state='ok', new_state='ok') self.assertFalse(publish.called) publish.reset_mock() plugin.dispatch_event('collection_state_change', self.session, collection=package.collection, prev_state='ok', new_state='ok') self.assertFalse(publish.called)
def main(self): self.poll_builds() self.log.debug('Polling Koji packages...') self.backend.refresh_packages() self.db.commit() self.db.close() plugin.dispatch_event('polling_event', self.backend) self.db.commit() self.db.close() self.log.debug('Polling latest real builds...') self.backend.refresh_latest_builds() self.db.commit() self.log.debug('Polling finished')
def main(self): try: for _, _, topic, msg in fedmsg.tail_messages(): self.notify_watchdog() try: if topic.startswith(get_config('fedmsg.topic') + '.'): self.consume(topic, msg) plugin.dispatch_event('fedmsg_event', topic, msg, db=self.db, koji_sessions=self.koji_sessions) finally: self.db.rollback() except requests.exceptions.ConnectionError: self.log.exception("Fedmsg watcher exception.") fedmsg.destroy() fedmsg.init()
def main(self): self.poll_builds() self.log.info('Polling Koji packages...') backend.refresh_packages(self.session) self.db.commit() plugin.dispatch_event('polling_event', self.session) self.db.commit() self.log.info('Polling latest real builds...') backend.refresh_latest_builds(self.session) self.db.commit() self.log.info('Refreshing statistics...') self.db.refresh_materialized_view(ResourceConsumptionStats, ScalarStats) self.db.commit() self.log.info('Polling finished')
def emit_package_state_changes(self, state_changes): """ Emits package state change events for given map as produced by check_package_state_changes """ if not state_changes: return for package in self.db.query(Package)\ .filter(Package.id.in_(state_changes.iterkeys()))\ .options(joinedload(Package.groups), joinedload(Package.collection)): prev_state, new_state = state_changes[package.id] dispatch_event('package_state_change', package=package, prev_state=prev_state, new_state=new_state)
def test_event(self): package = self.prepare_package() with patch('fedmsg.publish') as publish: plugin.dispatch_event('package_state_change', package=package, prev_state='failed', new_state='ok') publish.assert_called_once_with(topic='package.state.change', modname='koschei', msg={'name': 'rnv', 'old': 'failed', 'new': 'ok', 'repo': 'tag', 'collection': 'foo', 'collection_name': 'Foo', 'koji_instance': 'primary', 'groups': ['c', 'foo/xml']})
def test_get_my_packages(self): results = [] for r in plugin.dispatch_event('get_user_packages', self.session, username='******'): results += r self.assertIn('rnv', results)
def emit_package_state_changes(self, state_changes): """ Emits package state change events for given map as produced by check_package_state_changes """ if not state_changes: return for package in self.db.query(Package)\ .filter(Package.id.in_(state_changes.iterkeys()))\ .options(joinedload(Package.groups), joinedload(Package.collection)): prev_state, new_state = state_changes[package.id] dispatch_event('package_state_change', package=package, prev_state=prev_state, new_state=new_state)
def test_get_my_packages(self): results = [] for r in plugin.dispatch_event('get_user_packages', self.session, username='******'): results += r self.assertIn('rnv', results)
def test_event(self): package = self.prepare_package() with patch('fedmsg.publish') as publish: plugin.dispatch_event('package_state_change', package=package, prev_state='failed', new_state='ok') publish.assert_called_once_with(topic='package.state.change', modname='koschei', msg={ 'name': 'rnv', 'old': 'failed', 'new': 'ok', 'repo': 'f22', 'koji_instance': 'primary', 'groups': ['c', 'foo/xml'] })
def update_build_state(self, build, state): """ Updates state of the build in db to new state (Koji state name). Deletes canceled builds. Sends fedmsg when the build is complete. Commits the transaction. """ if state in Build.KOJI_STATE_MAP: state = Build.KOJI_STATE_MAP[state] build_id = build.id package_id = build.package_id self.db.expire_all() # lock package so there are no concurrent state changes package = self.db.query(Package).filter_by(id=package_id)\ .with_lockmode('update').one() # lock build build = self.db.query(Build).filter_by(id=build_id)\ .with_lockmode('update').first() if not build or build.state == state: # other process did the job already self.db.rollback() return if state == Build.CANCELED: self.log.info( 'Deleting build {0} because it was canceled'.format(build)) self.db.delete(build) self.db.commit() return assert state in (Build.COMPLETE, Build.FAILED) self.log.info('Setting build {build} state to {state}'.format( build=build, state=Build.REV_STATE_MAP[state])) self.sync_tasks(build, complete=True) self.db.expire(build.package) prev_state = package.msg_state_string build.state = state # unlock self.db.commit() new_state = package.msg_state_string if prev_state != new_state: dispatch_event('package_state_change', package=package, prev_state=prev_state, new_state=new_state) else: self.sync_tasks(build) self.db.commit()
def main(self): try: for _, _, topic, msg in fedmsg.tail_messages(): self.notify_watchdog() try: if topic.startswith(get_config('fedmsg.topic') + '.'): self.consume(topic, msg) plugin.dispatch_event('fedmsg_event', topic, msg, db=self.db, koji_sessions=self.koji_sessions) finally: self.db.rollback() except requests.exceptions.ConnectionError: self.log.exception("Fedmsg watcher exception.") fedmsg.destroy() fedmsg.init()
def test_same_state(self): package = self.prepare_package('rnv') self.prepare_group('c', content=['rnv']) self.prepare_group('xml', namespace='foo', content=['rnv']) with patch('fedora_messaging.api.publish') as publish: plugin.dispatch_event('package_state_change', self.session, package=package, prev_state='ok', new_state='ok') self.assertFalse(publish.called) publish.reset_mock() plugin.dispatch_event('collection_state_change', self.session, collection=package.collection, prev_state='ok', new_state='ok') self.assertFalse(publish.called)
def test_event(self): package = self.prepare_package('rnv') self.prepare_group('c', content=['rnv']) self.prepare_group('xml', namespace='foo', content=['rnv']) with patch('fedmsg.publish') as publish: plugin.dispatch_event('package_state_change', self.session, package=package, prev_state='failed', new_state='ok') publish.assert_called_once_with(topic='package.state.change', modname='koschei', msg={ 'name': 'rnv', 'old': 'failed', 'new': 'ok', 'repo': 'f25', 'collection': 'f25', 'collection_name': 'Fedora Rawhide', 'koji_instance': 'primary', 'groups': ['c', 'foo/xml'] }) publish.reset_mock() plugin.dispatch_event('collection_state_change', self.session, collection=package.collection, prev_state='unresolved', new_state='ok') publish.assert_called_once_with(topic='collection.state.change', modname='koschei', msg={ 'old': 'unresolved', 'new': 'ok', 'collection': 'f25', 'collection_name': 'Fedora Rawhide', 'repo_id': 123, 'koji_instance': 'primary' })
def execute(self, session, older_than): if older_than < 2: sys.exit("Minimal allowed value is 2 months") build_res = session.db.execute(""" DELETE FROM build WHERE started < now() - '{months} month'::interval AND id NOT IN ( SELECT last_build_id AS id FROM package WHERE last_build_id IS NOT null UNION SELECT last_complete_build_id FROM package WHERE last_complete_build_id IS NOT null ) """.format(months=older_than)) resolution_res = session.db.execute(""" DELETE FROM resolution_change WHERE "timestamp" < now() - '{months} month':: interval """.format(months=older_than)) session.log_user_action("Cleanup: Deleted {} builds".format( build_res.rowcount)) session.log_user_action( "Cleanup: Deleted {} resolution changes".format( resolution_res.rowcount)) plugin.dispatch_event('cleanup', session, older_than)
def user_packages(name): names = [] try: for res in plugin.dispatch_event('get_user_packages', username=name): if res: names += res except Exception: flash("Error retrieving user's packages") log.exception("Error retrieving user's packages") def query_fn(query): return query.filter(Package.name.in_(names)) return package_view("user-packages.html", query_fn, username=name)
def user_packages(name): names = [] try: for res in plugin.dispatch_event('get_user_packages', username=name): if res: names += res except Exception: flash("Error retrieving user's packages") log.exception("Error retrieving user's packages") def query_fn(query): return query.filter(Package.name.in_(names)) return package_view("user-packages.html", query_fn, username=name)
def execute(self, session, older_than): if older_than < 2: sys.exit("Minimal allowed value is 2 months") build_res = session.db.execute(""" DELETE FROM build WHERE started < now() - '{months} month'::interval AND id NOT IN ( SELECT last_build_id AS id FROM package WHERE last_build_id IS NOT null UNION SELECT last_complete_build_id FROM package WHERE last_complete_build_id IS NOT null ) """.format(months=older_than)) resolution_res = session.db.execute(""" DELETE FROM resolution_change WHERE "timestamp" < now() - '{months} month':: interval """.format(months=older_than)) session.log_user_action( "Cleanup: Deleted {} builds".format(build_res.rowcount) ) session.log_user_action( "Cleanup: Deleted {} resolution changes".format(resolution_res.rowcount) ) plugin.dispatch_event('cleanup', session, older_than)
def user_packages(username): """ Displays packages for the current user. What it means is defined by plugins. In Fedora, pagure plugin is used, which queries pagure for packages maintained by the user. """ names = [] try: results = plugin.dispatch_event('get_user_packages', session, username=username) for result in results: if result: names += result except Exception: flash_nak("Error retrieving user's packages") session.log.exception("Error retrieving user's packages") def query_fn(query): return query.filter(BasePackage.name.in_(names) if names else false()) return package_view("user-packages.html", query_fn, username=username)
def user_packages(username): """ Displays packages for the current user. What it means is defined by plugins. In Fedora, pagure plugin is used, which queries pagure for packages maintained by the user. """ names = [] try: results = plugin.dispatch_event('get_user_packages', session, username=username) for result in results: if result: names += result except Exception: flash_nak("Error retrieving user's packages") session.log.exception("Error retrieving user's packages") def query_fn(query): return query.filter(BasePackage.name.in_(names) if names else false()) return package_view("user-packages.html", query_fn, username=username)
def persist_resolution_output(self, chunk): """ Stores resolution output into the database and sends fedmsg if needed. chunk format: [ ResolutionOutput( package=Package(...), prev_resolved=False, resolved=True, # current resolution status changes=[dict(...), ...], # dependency changes in dict form problems={dict(...), ...}, # dependency problems in dict form, # note it's a set last_build_id=456, # used to detect concurrently inserted builds ), ...] """ if not chunk: return package_ids = [p.package.id for p in chunk] # expire packages, so that we get the packages we locked, not old # version in sqla cache for p in chunk: self.db.expire(p.package) # lock the packages to be updated ( self.db.query(Package.id) .filter(Package.id.in_(package_ids)) .order_by(Package.id) # ordering to prevent deadlocks .with_for_update() .all() ) # find latest resolution problems to be compared for change previous_problems = { r.package_id: set(p.problem for p in r.problems) for r in self.db.query(ResolutionChange) .filter(ResolutionChange.package_id.in_(package_ids)) .options(joinedload(ResolutionChange.problems)) .order_by(ResolutionChange.package_id, ResolutionChange.timestamp.desc()) .distinct(ResolutionChange.package_id) .all() } # dependency problems to be persisted # format: [tuple(resolution change (orm objects), problems (strings))] problem_entries = [] # dependency changes to be persisted dependency_changes = [] # state changes for fedmsg. Message sending should be done after commit # format: a dict from id -> (prev_state: string, new_state: string) state_changes = {} update_weight = get_config('priorities.package_update') # update packages, queue resolution results, changes and problems for insertion for pkg_result in chunk: package = pkg_result.package if pkg_result.last_build_id != package.last_build_id: # there was a build submitted/registered in the meantime, # our results are likely outdated -> discard them continue # get state before update prev_state = package.msg_state_string package.resolved = pkg_result.resolved # get state after update new_state = package.msg_state_string # compute dependency priority package.dependency_priority = int( sum( update_weight / (change['distance'] or 8) for change in pkg_result.changes ) ) if prev_state != new_state: # queue for fedmsg sending after commit state_changes[package.id] = prev_state, new_state dependency_changes += pkg_result.changes # compare whether there was any change from the previous state # - we should emit a new resolution change only if the resolution # state or the set of dependency problems changed if ( pkg_result.prev_resolved != pkg_result.resolved or ( pkg_result.resolved is False and pkg_result.prev_resolved is False and # both are sets, they can be compared directly pkg_result.problems != previous_problems.get(package.id) ) ): resolution_change = ResolutionChange( package_id=package.id, resolved=pkg_result.resolved, ) self.db.add(resolution_change) problem_entries.append((resolution_change, pkg_result.problems)) # populate resolution changes' ids self.db.flush() # set problem resolution_ids and prepare dict form to_insert = [ dict(resolution_id=resolution_change.id, problem=problem) for resolution_change, problems in problem_entries for problem in problems ] # insert dependency problems if to_insert: self.db.execute(insert(ResolutionProblem, to_insert)) # delete old dependency changes, they'll be replaced with new ones self.db.query(UnappliedChange)\ .filter(UnappliedChange.package_id.in_(package_ids))\ .delete() # insert dependency changes if dependency_changes: self.db.execute(insert(UnappliedChange, dependency_changes)) self.db.commit_no_expire() # emit fedmsg (if enabled) if state_changes: for package in self.db.query(Package)\ .filter(Package.id.in_(state_changes))\ .options(joinedload(Package.groups), joinedload(Package.collection)): prev_state, new_state = state_changes[package.id] dispatch_event( 'package_state_change', self.session, package=package, prev_state=prev_state, new_state=new_state, )
def update_build_state(self, build, state): """ Updates state of the build in db to new state (Koji state name). Cancels builds running too long. Deletes canceled builds. Sends fedmsg when the build is complete. Commits the transaction. """ try: task_timeout = timedelta(0, get_config('koji_config.task_timeout')) time_threshold = datetime.now() - task_timeout if (state not in Build.KOJI_STATE_MAP and (build.started and build.started < time_threshold or build.cancel_requested)): self.log.info('Canceling build {0}'.format(build)) try: self.koji_sessions['primary'].cancelTask(build.task_id) except koji.GenericError: pass state = 'CANCELED' if state in Build.KOJI_STATE_MAP: state = Build.KOJI_STATE_MAP[state] build_id = build.id package_id = build.package_id self.db.expire_all() # lock build build = self.db.query(Build).filter_by(id=build_id)\ .with_lockmode('update').first() if not build or build.state == state: # other process did the job already self.db.rollback() return if state == Build.CANCELED: self.log.info('Deleting build {0} because it was canceled' .format(build)) self.db.delete(build) self.db.commit() return assert state in (Build.COMPLETE, Build.FAILED) if koji_util.is_koji_fault(self.koji_sessions['primary'], build.task_id): self.log.info('Deleting build {0} because it ended with Koji fault' .format(build)) self.db.delete(build) self.db.commit() return self.log.info('Setting build {build} state to {state}' .format(build=build, state=Build.REV_STATE_MAP[state])) tasks = self.sync_tasks([build], self.koji_sessions['primary']) if build.repo_id is None: # Koji problem, no need to bother packagers with this self.log.info('Deleting build {0} because it has no repo_id' .format(build)) self.db.delete(build) self.db.commit() return self.insert_koji_tasks(tasks) self.db.expire(build.package) # lock package so there are no concurrent state changes package = self.db.query(Package).filter_by(id=package_id)\ .with_lockmode('update').one() prev_state = package.msg_state_string build.state = state self.db.flush() self.db.expire(package) new_state = package.msg_state_string # unlock self.db.commit() if prev_state != new_state: dispatch_event('package_state_change', package=package, prev_state=prev_state, new_state=new_state) else: tasks = self.sync_tasks([build], self.koji_sessions['primary']) self.insert_koji_tasks(tasks) self.db.commit() except (StaleDataError, ObjectDeletedError, IntegrityError): # build was deleted concurrently self.db.rollback()
def check_package_state(package, prev_state): new_state = package.msg_state_string if prev_state != new_state: dispatch_event('package_state_change', package=package, prev_state=prev_state, new_state=new_state)
def update_build_state(session, build, task_state): """ Updates state of the build in db to new state (Koji state name). Cancels builds running too long. Deletes canceled builds. Sends fedmsg when the build is complete. Commits the transaction. """ # pylint: disable=too-many-statements try: task_timeout = timedelta(0, get_config('koji_config.task_timeout')) time_threshold = datetime.now() - task_timeout canceled = task_state == 'CANCELED' # Cancel builds that were running for too long. Prevents starvations of resources # as Koji would sometimes keep builds running for weeks without complaining. if (not canceled and task_state not in Build.KOJI_STATE_MAP and (build.started and build.started < time_threshold or build.cancel_requested)): session.log.info('Canceling build {0}'.format(build)) try: session.koji('primary').cancelTask(build.task_id) except koji.GenericError: pass canceled = True if canceled or task_state in Build.KOJI_STATE_MAP: # The build has finished. build_state = Build.KOJI_STATE_MAP.get(task_state) # We need to lock build to prevent duplicate inserts and fedmsg. # It is necessary to be careful here. We need to get the row we lock, but # SQLA "caches" rows and would happily return stale data despite the lock. # We need to expire the objects, so that they're fetched anew. # We also need to avoid accessing any property (including ids) of the objects # before the locking, otherwise the fetch would occur prematurely and there # would be the same race condition we tried to avoid using the expiration. build_id = build.id package_id = build.package_id session.db.expire_all() build = session.db.query(Build).filter_by(id=build_id)\ .with_lockmode('update').first() if not build or build.state == build_state: # Another process did the job already in parallel, nothing to do session.db.rollback() return if canceled: session.log.info( 'Deleting build {0} because it was canceled'.format(build)) session.db.delete(build) session.db.commit() return assert build_state in (Build.COMPLETE, Build.FAILED) # Detect if the build ended with a "Koji fault". A "Koji fault" is an # exception that occured due to Koji itself, like faulty network if koji_util.is_koji_fault(session.koji('primary'), build.task_id): # Delete such build, it is most likely a false positive session.log.info( 'Deleting build {0} because it ended with Koji fault'. format(build)) session.db.delete(build) session.db.commit() return session.log.info('Setting build {build} state to {state}'.format( build=build, state=Build.REV_STATE_MAP[build_state])) # Get buildArch subtasks and repo_id tasks = sync_tasks(session, build.package.collection, [build]) if build.repo_id is None: # This shouldn't normally happen. May be Koji problem. # We cannot resolve build with no repo_id, so it's better to throw it away session.log.info( 'Deleting build {0} because it has no repo_id'.format( build)) session.db.delete(build) session.db.commit() return # Persist the tasks insert_koji_tasks(session, tasks) # Lock package, so there are no concurrent state changes. # Again, as with the previous locking, we need to be careful # with property access and expiration session.db.expire(build.package) # To prevent deadlocks, locking order always needs to be "build, then package" package = session.db.query(Package).filter_by(id=package_id)\ .with_lockmode('update').one() # Reset priorities, delete UnappliedChanges clear_priority_data(session, [package]) # Acquire previous state, we need it for the previous state field of fedmsg # This needs to be done *before* updating the build state. prev_state = package.msg_state_string build.state = build_state # Bump build_priority if needed (most likely not) set_failed_build_priority(session, package, build) session.db.flush() # Re-fetch package so it has fields updated by triggers session.db.expire(package) new_state = package.msg_state_string # Unlock both build and package session.db.commit() if prev_state != new_state: # Send fedmsg if there was a change dispatch_event( 'package_state_change', session=session, package=package, prev_state=prev_state, new_state=new_state, ) else: # The build is still running, but we can at least get the buildArch tasks and # repo_id, so that resolver can already resolve its dependencies tasks = sync_tasks(session, build.package.collection, [build]) insert_koji_tasks(session, tasks) session.db.commit() except (StaleDataError, ObjectDeletedError, IntegrityError): # Build was deleted concurrently by another process, nothing to do session.db.rollback()
def update_build_state(session, build, task_state): """ Updates state of the build in db to new state (Koji state name). Cancels builds running too long. Deletes canceled builds. Sends fedmsg when the build is complete. Commits the transaction. """ # pylint: disable=too-many-statements try: task_timeout = timedelta(0, get_config('koji_config.task_timeout')) time_threshold = datetime.now() - task_timeout canceled = task_state == 'CANCELED' # Cancel builds that were running for too long. Prevents starvations of resources # as Koji would sometimes keep builds running for weeks without complaining. if (not canceled and task_state not in Build.KOJI_STATE_MAP and (build.started and build.started < time_threshold or build.cancel_requested)): session.log.info('Canceling build {0}'.format(build)) try: session.koji('primary').cancelTask(build.task_id) except koji.GenericError: pass canceled = True if canceled or task_state in Build.KOJI_STATE_MAP: # The build has finished. build_state = Build.KOJI_STATE_MAP.get(task_state) # We need to lock build to prevent duplicate inserts and fedmsg. # It is necessary to be careful here. We need to get the row we lock, but # SQLA "caches" rows and would happily return stale data despite the lock. # We need to expire the objects, so that they're fetched anew. # We also need to avoid accessing any property (including ids) of the objects # before the locking, otherwise the fetch would occur prematurely and there # would be the same race condition we tried to avoid using the expiration. build_id = build.id package_id = build.package_id session.db.expire_all() build = session.db.query(Build).filter_by(id=build_id)\ .with_lockmode('update').first() if not build or build.state == build_state: # Another process did the job already in parallel, nothing to do session.db.rollback() return if canceled: session.log.info('Deleting build {0} because it was canceled' .format(build)) session.db.delete(build) session.db.commit() return assert build_state in (Build.COMPLETE, Build.FAILED) # Detect if the build ended with a "Koji fault". A "Koji fault" is an # exception that occured due to Koji itself, like faulty network if koji_util.is_koji_fault(session.koji('primary'), build.task_id): # Delete such build, it is most likely a false positive session.log.info('Deleting build {0} because it ended with Koji fault' .format(build)) session.db.delete(build) session.db.commit() return session.log.info('Setting build {build} state to {state}' .format(build=build, state=Build.REV_STATE_MAP[build_state])) # Get buildArch subtasks and repo_id tasks = sync_tasks(session, build.package.collection, [build]) if build.repo_id is None: # This shouldn't normally happen. May be Koji problem. # We cannot resolve build with no repo_id, so it's better to throw it away session.log.info('Deleting build {0} because it has no repo_id' .format(build)) session.db.delete(build) session.db.commit() return # Persist the tasks insert_koji_tasks(session, tasks) # Lock package, so there are no concurrent state changes. # Again, as with the previous locking, we need to be careful # with property access and expiration session.db.expire(build.package) # To prevent deadlocks, locking order always needs to be "build, then package" package = session.db.query(Package).filter_by(id=package_id)\ .with_lockmode('update').one() # Reset priorities, delete UnappliedChanges clear_priority_data(session, [package]) # Acquire previous state, we need it for the previous state field of fedmsg # This needs to be done *before* updating the build state. prev_state = package.msg_state_string build.state = build_state # Bump build_priority if needed (most likely not) set_failed_build_priority(session, package, build) session.db.flush() # Re-fetch package so it has fields updated by triggers session.db.expire(package) new_state = package.msg_state_string # Unlock both build and package session.db.commit() if prev_state != new_state: # Send fedmsg if there was a change dispatch_event( 'package_state_change', session=session, package=package, prev_state=prev_state, new_state=new_state, ) else: # The build is still running, but we can at least get the buildArch tasks and # repo_id, so that resolver can already resolve its dependencies tasks = sync_tasks(session, build.package.collection, [build]) insert_koji_tasks(session, tasks) session.db.commit() except (StaleDataError, ObjectDeletedError, IntegrityError): # Build was deleted concurrently by another process, nothing to do session.db.rollback()
def persist_resolution_output(self, chunk): """ Stores resolution output into the database and sends fedmsg if needed. chunk format: [ ResolutionOutput( package=Package(...), prev_resolved=False, resolved=True, # current resolution status changes=[dict(...), ...], # dependency changes in dict form problems={dict(...), ...}, # dependency problems in dict form, # note it's a set last_build_id=456, # used to detect concurrently inserted builds ), ...] """ if not chunk: return package_ids = [p.package.id for p in chunk] # expire packages, so that we get the packages we locked, not old # version in sqla cache for p in chunk: self.db.expire(p.package) # lock the packages to be updated ( self.db.query(Package.id) .filter(Package.id.in_(package_ids)) .order_by(Package.id) # ordering to prevent deadlocks .with_lockmode('update') .all() ) # find latest resolution problems to be compared for change previous_problems = { r.package_id: set(p.problem for p in r.problems) for r in self.db.query(ResolutionChange) .filter(ResolutionChange.package_id.in_(package_ids)) .options(joinedload(ResolutionChange.problems)) .order_by(ResolutionChange.package_id, ResolutionChange.timestamp.desc()) .distinct(ResolutionChange.package_id) .all() } # dependency problems to be persisted # format: [tuple(resolution change (orm objects), problems (strings))] problem_entries = [] # dependency changes to be persisted dependency_changes = [] # state changes for fedmsg. Message sending should be done after commit # format: a dict from id -> (prev_state: string, new_state: string) state_changes = {} update_weight = get_config('priorities.package_update') # update packages, queue resolution results, changes and problems for insertion for pkg_result in chunk: package = pkg_result.package if pkg_result.last_build_id != package.last_build_id: # there was a build submitted/registered in the meantime, # our results are likely outdated -> discard them continue # get state before update prev_state = package.msg_state_string package.resolved = pkg_result.resolved # get state after update new_state = package.msg_state_string # compute dependency priority package.dependency_priority = int( sum( update_weight / (change['distance'] or 8) for change in pkg_result.changes ) ) if prev_state != new_state: # queue for fedmsg sending after commit state_changes[package.id] = prev_state, new_state dependency_changes += pkg_result.changes # compare whether there was any change from the previous state # - we should emit a new resolution change only if the resolution # state or the set of dependency problems changed if ( pkg_result.prev_resolved != pkg_result.resolved or ( pkg_result.resolved is False and pkg_result.prev_resolved is False and # both are sets, they can be compared directly pkg_result.problems != previous_problems.get(package.id) ) ): resolution_change = ResolutionChange( package_id=package.id, resolved=pkg_result.resolved, ) self.db.add(resolution_change) problem_entries.append((resolution_change, pkg_result.problems)) # populate resolution changes' ids self.db.flush() # set problem resolution_ids and prepare dict form to_insert = [ dict(resolution_id=resolution_change.id, problem=problem) for resolution_change, problems in problem_entries for problem in problems ] # insert dependency problems if to_insert: self.db.execute(insert(ResolutionProblem, to_insert)) # delete old dependency changes, they'll be replaced with new ones self.db.query(UnappliedChange)\ .filter(UnappliedChange.package_id.in_(package_ids))\ .delete() # insert dependency changes if dependency_changes: self.db.execute(insert(UnappliedChange, dependency_changes)) self.db.commit_no_expire() # emit fedmsg (if enabled) if state_changes: for package in self.db.query(Package)\ .filter(Package.id.in_(state_changes))\ .options(joinedload(Package.groups), joinedload(Package.collection)): prev_state, new_state = state_changes[package.id] dispatch_event( 'package_state_change', self.session, package=package, prev_state=prev_state, new_state=new_state, )
def update_build_state(self, build, state): """ Updates state of the build in db to new state (Koji state name). Cancels builds running too long. Deletes canceled builds. Sends fedmsg when the build is complete. Commits the transaction. """ try: task_timeout = timedelta(0, get_config('koji_config.task_timeout')) time_threshold = datetime.now() - task_timeout if (state not in Build.KOJI_STATE_MAP and (build.started and build.started < time_threshold or build.cancel_requested)): self.log.info('Canceling build {0}'.format(build)) try: self.koji_sessions['primary'].cancelTask(build.task_id) except koji.GenericError: pass state = 'CANCELED' if state in Build.KOJI_STATE_MAP: state = Build.KOJI_STATE_MAP[state] build_id = build.id package_id = build.package_id self.db.expire_all() # lock build build = self.db.query(Build).filter_by(id=build_id)\ .with_lockmode('update').first() if not build or build.state == state: # other process did the job already self.db.rollback() return if state == Build.CANCELED: self.log.info( 'Deleting build {0} because it was canceled'.format( build)) self.db.delete(build) self.db.commit() return assert state in (Build.COMPLETE, Build.FAILED) if koji_util.is_koji_fault(self.koji_sessions['primary'], build.task_id): self.log.info( 'Deleting build {0} because it ended with Koji fault'. format(build)) self.db.delete(build) self.db.commit() return self.log.info('Setting build {build} state to {state}'.format( build=build, state=Build.REV_STATE_MAP[state])) tasks = self.sync_tasks([build], self.koji_sessions['primary']) if build.repo_id is None: # Koji problem, no need to bother packagers with this self.log.info( 'Deleting build {0} because it has no repo_id'.format( build)) self.db.delete(build) self.db.commit() return self.insert_koji_tasks(tasks) self.db.expire(build.package) # lock package so there are no concurrent state changes package = self.db.query(Package).filter_by(id=package_id)\ .with_lockmode('update').one() prev_state = package.msg_state_string build.state = state self.db.flush() self.db.expire(package) new_state = package.msg_state_string # unlock self.db.commit() if prev_state != new_state: dispatch_event('package_state_change', package=package, prev_state=prev_state, new_state=new_state) else: tasks = self.sync_tasks([build], self.koji_sessions['primary']) self.insert_koji_tasks(tasks) self.db.commit() except (StaleDataError, ObjectDeletedError, IntegrityError): # build was deleted concurrently self.db.rollback()
def test_same_state(self): package = self.prepare_package() with patch('fedmsg.publish') as publish: plugin.dispatch_event('package_state_change', package=package, prev_state='ok', new_state='ok') self.assertFalse(publish.called)
def update_build_state(session, build, task_state): """ Updates state of the build in db to new state (Koji state name). Cancels builds running too long. Deletes canceled builds. Sends fedmsg when the build is complete. Commits the transaction. """ # pylint: disable=too-many-statements try: task_timeout = timedelta(0, get_config('koji_config.task_timeout')) time_threshold = datetime.now() - task_timeout canceled = task_state == 'CANCELED' if (not canceled and task_state not in Build.KOJI_STATE_MAP and (build.started and build.started < time_threshold or build.cancel_requested)): session.log.info('Canceling build {0}'.format(build)) try: session.koji('primary').cancelTask(build.task_id) except koji.GenericError: pass canceled = True if canceled or task_state in Build.KOJI_STATE_MAP: build_state = Build.KOJI_STATE_MAP.get(task_state) build_id = build.id package_id = build.package_id session.db.expire_all() # lock build build = session.db.query(Build).filter_by(id=build_id)\ .with_lockmode('update').first() if not build or build.state == build_state: # other process did the job already session.db.rollback() return if canceled: session.log.info( 'Deleting build {0} because it was canceled'.format(build)) session.db.delete(build) session.db.commit() return assert build_state in (Build.COMPLETE, Build.FAILED) if koji_util.is_koji_fault(session.koji('primary'), build.task_id): session.log.info( 'Deleting build {0} because it ended with Koji fault'. format(build)) session.db.delete(build) session.db.commit() return session.log.info('Setting build {build} state to {state}'.format( build=build, state=Build.REV_STATE_MAP[build_state])) tasks = sync_tasks(session, build.package.collection, [build]) if build.repo_id is None: # Koji problem, no need to bother packagers with this session.log.info( 'Deleting build {0} because it has no repo_id'.format( build)) session.db.delete(build) session.db.commit() return insert_koji_tasks(session, tasks) session.db.expire(build.package) # lock package so there are no concurrent state changes # (locking order needs to be build -> package) package = session.db.query(Package).filter_by(id=package_id)\ .with_lockmode('update').one() # reset priorities clear_priority_data(session, [package]) # acquire previous state # ! this needs to be done *before* updating the build state prev_state = package.msg_state_string build.state = build_state set_failed_build_priority(session, package, build) # refresh package so it haves trigger updated fields session.db.flush() session.db.expire(package) new_state = package.msg_state_string # unlock session.db.commit() if prev_state != new_state: dispatch_event( 'package_state_change', session=session, package=package, prev_state=prev_state, new_state=new_state, ) else: tasks = sync_tasks(session, build.package.collection, [build]) insert_koji_tasks(session, tasks) session.db.commit() except (StaleDataError, ObjectDeletedError, IntegrityError): # build was deleted concurrently session.db.rollback()