def decorator(handler, *args, **kwargs): try: return func(handler, *args, **kwargs) except Exception as e: # Skip the exception in case it has been already handled by # some decorator. This can happen in case multiple decorators # are nested. if handler._last_handled_exception == e: raise handler._last_handled_exception = e err = 'Could not process message handler. See the traceback.' log.exception(err) # In case the exception interrupted the database transaction, # rollback it. db.session.rollback() # Mark the event as failed. db_event_id = handler.current_db_event_id db_event = db.session.query(Event).filter_by( id=db_event_id).first() if db_event: msg = "Handling of event failed with traceback: %s" % (str(e)) db_event.transition(EventState.FAILED, msg) db_event.builds_transition(ArtifactBuildState.FAILED.value, msg) db.session.commit() raise
def fetch_cve_metadata(self, cve_list): """ Fetches metadata about each CVE in `cve_list` and returns a tuple with the name of highest severity rate and the affected packages (a dictionary with product and pkg_name). See `SFM2API.THREAT_SEVERITIES` for list of possible severity rates. :param list cve_list: List of strings with CVE names. :rtype: str :return: Tuple, the first element is the name of highest severity rate occuring in CVEs from `cve_list`. The second element is a list of dicts, with "product" and "pkg_name" of the affected packages. """ max_rating = -1 affected_pkgs = [] severity = None for cve in cve_list: try: elements = self.query_sfm2(cve) except requests.exceptions.HTTPError as e: if e.response.status_code == 400: log.warning( "The request for the CVE %s to the SFM2 API seems wrong, " "impact and affected packages unknown. %s", cve, e.response.request.url) continue if e.response.status_code == 500: log.warning( "Some error occurred looking forCVE %s with SFM2 API, " "impact and affected packages unknown. %s", cve, e.response.request.url) continue raise try: severity = elements['impact'] except (IndexError, KeyError): log.warning("Some error occured looking for impact for CVE %s using SFM2 API", cve) try: affected_pkgs.extend([ {'product': item['ps_module'], 'pkg_name': item['ps_component']} for item in elements['affects'] if ( item['affected'] != "notaffected" and item['resolution'] not in ["wontfix", "ooss"])]) except (KeyError, IndexError): log.exception("Some error occured looking for affected packages for CVE %s using SFM2 API", cve) try: rating = SFM2API.THREAT_SEVERITIES.index(severity) except ValueError: log.error("Unknown threat_severity '%s' for CVE %s", severity, cve) continue max_rating = max(max_rating, rating) if max_rating == -1: return (None, affected_pkgs) return (SFM2API.THREAT_SEVERITIES[max_rating], affected_pkgs)
def poll(self): try: self.check_unfinished_koji_tasks(db.session) except _sa_disconnect_exceptions as ex: db.session.rollback() log.error("Invalid request, session is rolled back: %s", ex.orig) except Exception: msg = 'Error in poller execution:' log.exception(msg) log.info('Poller will now sleep for "{}" seconds' .format(conf.polling_interval))
def _validate_kerberos_config(): """ Validates the kerberos configuration and raises ValueError in case of error. """ errors = [] if not conf.auth_ldap_server: errors.append("kerberos authentication enabled with no LDAP server " "configured, check AUTH_LDAP_SERVER in your config.") if not conf.auth_ldap_user_base: errors.append("kerberos authentication enabled with no LDAP user " "base configured, check AUTH_LDAP_USER_BASE in your " "config.") if errors: for error in errors: log.exception(error) raise ValueError("Invalid configuration for kerberos authentication.")
def get_docker_repo_tags(self, errata_id): """ Get ET repo/tag configuration using XML-RPC call get_advisory_cdn_docker_file_list :param int errata_id: Errata advisory ID. :rtype: dict :return: Dict of advisory builds with repo and tag config: { 'build_NVR': { 'cdn_repo1': [ 'tag1', 'tag2' ], ... }, ... } """ try: response = self.xmlrpc.get_advisory_cdn_docker_file_list( errata_id) except Exception: log.exception("Canot call XMLRPC get_advisory_cdn_docker_file_list call.") return None if response is None: log.warning("The get_advisory_cdn_docker_file_list XMLRPC call " "returned None.") return None repo_tags = dict() for build_nvr in response: if build_nvr not in repo_tags: repo_tags[build_nvr] = dict() repos = response[build_nvr]['docker']['target']['repos'] for repo in repos: tags = repos[repo]['tags'] repo_tags[build_nvr][repo] = tags return repo_tags
def consume(self, message): messaging_rx_counter.inc() # Sometimes, the messages put into our queue are artificially put there # by other parts of our own codebase. If they are already abstracted # messages, then just use them as-is. If they are not already # instances of our message abstraction base class, then first transform # them before proceeding. if isinstance(message, events.BaseEvent): msg = message else: msg = self.get_abstracted_msg(message['body']) if not msg: # We do not log here anything, because it would create lot of # useless messages in the logs. messaging_rx_ignored_counter.inc() return # Primary work is done here. try: # There is no Flask app-context in the backend and we need some, # because models.Event.json() and models.ArtifactBuild.json() uses # flask.url_for, which needs app_context to generate the URL. # We also cannot generate Flask context on the fly each time in the # mentioned json() calls, because each generation of Flask context # changes db.session and unfortunately does not give it to original # state which might be Flask bug, so the only safe way on backend is # to have global app_context. with app.app_context(): self.process_event(msg) messaging_rx_processed_ok_counter.inc() except Exception: messaging_rx_failed_counter.inc() log.exception('Failed while handling {0!r}'.format(msg)) if self.stop_condition and self.stop_condition(message): self.shutdown()
def process_event(self, msg): log.debug( 'Received a message with an ID of "{0}" and of type "{1}"'.format( getattr(msg, 'msg_id', None), type(msg).__name__)) handlers = load_classes(conf.handlers) handlers = sorted(handlers, key=lambda handler: getattr(handler, "order", 50)) for handler_class in handlers: handler = handler_class() if not handler.can_handle(msg): continue idx = "%s: %s, %s" % (type(handler).__name__, type(msg).__name__, msg.msg_id) log.debug("Calling %s" % idx) try: further_work = handler.handle(msg) or [] except Exception: err = 'Could not process message handler. See the traceback.' log.exception(err) else: # Handlers can *optionally* return a list of fake messages that # should be re-inserted back into the main work queue. We can # use this (for instance) when we submit a new component build # but (for some reason) it has already been built, then it can # fake its own completion back to the scheduler so that work # resumes as if it was submitted for real and koji announced # its completion. for event in further_work: log.info(" Scheduling faked event %r" % event) self.incoming.put(event) log.debug("Done with %s" % idx)
def internal_server_error(e): """Flask error handler for RuntimeError exceptions""" log.exception('Internal server error: %s', e) return json_error(500, 'Internal Server Error', str(e))
def validationerror_error(e): """Flask error handler for ValueError exceptions""" log.exception('Bad Request: %s', e) return json_error(400, 'Bad Request', str(e))