def test_manual_record(self): client = Client("http://*****:*****@example.com/0") with client.context: def processor(data): assert data["message"] == "whatever" assert data["level"] == "warning" assert data["category"] == "category" assert data["type"] == "the_type" assert data["data"] == {"foo": "bar"} data["data"]["extra"] = "something" breadcrumbs.record( message="whatever", level="warning", category="category", data={"foo": "bar"}, type="the_type", processor=processor, ) crumbs = client.context.breadcrumbs.get_buffer() assert len(crumbs) == 1 data = crumbs[0] assert data["message"] == "whatever" assert data["level"] == "warning" assert data["category"] == "category" assert data["type"] == "the_type" assert data["data"] == {"foo": "bar", "extra": "something"}
def _get_scope_db_field(self, scope): breadcrumbs.record(message="Scope: {}".format(scope)) if scope not in CharacterESIRoles.FIELD_MAPPING.keys(): raise AttributeError( "Scope {} requested but not in mapping".format(scope)) return CharacterESIRoles.FIELD_MAPPING.get(scope)
def __send_loop__(self): last_heartbeat = 0 backoff = ExpoBackoff(1200) while not self.should_quit(): try: self.ss = ServerSocket(self.config['ws_host'] + "/app/ws/device", self.config['token'], on_message=self.__on_server_ws_msg__) wst = threading.Thread(target=self.ss.run) wst.daemon = True wst.start() time.sleep(2) # Allow the time for server ws to connect while self.ss.connected(): breadcrumbs.record(message="Message loop for: " + self.config['token']) if time.time() - last_heartbeat > 60: self.__send_heartbeat__() last_heartbeat = time.time() self.send_octoprint_data() backoff.reset() time.sleep(10) finally: try: self.ss.disconnect() except: pass backoff.more() # When it gets here something is wrong. probably network issues. Pause before retry
def bulk_update_character_affiliations(update_interval_hours=48): if is_downtime(): return update_delay = timedelta(hours=update_interval_hours) day_ago = now() - update_delay # be sure to exclude npcs out_of_date_characters = EVEPlayerCharacter.objects.filter( (Q(publicdata_last_updated__isnull=True) | Q(publicdata_last_updated__lt=day_ago)) & ((Q(id__gt=90000000) & Q(id__lte=98000000)) | (Q(id__gt=100000000) & Q(id__lte=2147483648))) & Q( corporation__isnull=False ) # characters without a corporation are either super old legacy characters or complete bugged out. legacy chars will get fixed the next time get_object is called on them. ).order_by("publicdata_last_updated") # only grab first thousand characters out_of_date_characters = out_of_date_characters[0:999] characters = [c.pk for c in out_of_date_characters] breadcrumbs.record("len(characters) = {}".format(len(characters))) if not characters: logger.info("no characters need updating. ending task") return update_character_affiliations(characters)
def test_crumb_buffer(self): for enable in 1, 0: client = Client("http://*****:*****@example.com/0", enable_breadcrumbs=enable) with client.context: breadcrumbs.record(type="foo", data={"bar": "baz"}, message="aha", category="huhu") crumbs = client.context.breadcrumbs.get_buffer() assert len(crumbs) == enable
def start_hls_pipeline(self, remote_status, plugin, dev_settings): breadcrumbs.record(message="Token to upload mpegts: " + self.token) if not self.__init_camera__(plugin, dev_settings): return self.webcam_server = WebcamServer(self.camera) self.webcam_server.start() # Stream timestamps should be reset when ffmepg restarts requests.delete(self.stream_host+'/video/mpegts', headers={"Authorization": "Bearer " + self.token}) ffmpeg_cmd = '{} -re -i pipe:0 -y -an -vcodec copy -f hls -hls_time 2 -hls_list_size 10 -hls_delete_threshold 10 -hls_flags split_by_time+delete_segments+second_level_segment_index -strftime 1 -hls_segment_filename {}/%s-%%d.ts -hls_segment_type mpegts -'.format(FFMPEG, TS_TEMP_DIR) _logger.warn('Launching: ' + ffmpeg_cmd) FNULL = open(os.devnull, 'w') sub_proc = subprocess.Popen(ffmpeg_cmd.split(' '), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=FNULL) m3u8_thread = Thread(target=self.poll_m3u8, args=(sub_proc,)) m3u8_thread.setDaemon(True) m3u8_thread.start() while True: if remote_status['watching']: self.camera.start_recording(sub_proc.stdin, format='h264', quality=(30 if 'high' in dev_settings.get('camResolution', 'medium') else 23)) while remote_status['watching']: self.camera.wait_recording(2) self.camera.wait_recording(4) # record 4 more seconds to minimize the pause user views the stream again self.camera.stop_recording() else: time.sleep(0.05)
def test_manual_record(self): client = Client('http://*****:*****@example.com/0') with client.context: def processor(data): assert data['message'] == 'whatever' assert data['level'] == 'warning' assert data['category'] == 'category' assert data['type'] == 'the_type' assert data['data'] == {'foo': 'bar'} data['data']['extra'] = 'something' breadcrumbs.record(message='whatever', level='warning', category='category', data={'foo': 'bar'}, type='the_type', processor=processor) crumbs = client.context.breadcrumbs.get_buffer() assert len(crumbs) == 1 data = crumbs[0] assert data['message'] == 'whatever' assert data['level'] == 'warning' assert data['category'] == 'category' assert data['type'] == 'the_type' assert data['data'] == {'foo': 'bar', 'extra': 'something'}
def handle_tex_compiler_error(latex_file_path, ext): # TODO zmd: fix extension parsing log_file_path = latex_file_path[:-3] + "log" errors = ["Error occured, log file {} not found.".format(log_file_path)] with contextlib.suppress(FileNotFoundError, UnicodeDecodeError): with Path(log_file_path).open(encoding="utf-8") as latex_log: # TODO zmd: see if the lines we extract here contain enough info for debugging purpose print_context = 25 lines = [] relevant_line = -print_context for idx, line in enumerate(latex_log): if "fatal" in line.lower() or "error" in line.lower(): relevant_line = idx lines.append(line) elif idx - relevant_line < print_context: lines.append(line) errors = "\n".join(lines) logger.debug("%s ext=%s", errors, ext) with contextlib.suppress(ImportError): from raven import breadcrumbs breadcrumbs.record(message="luatex call", data=errors, type="cmd") raise FailureDuringPublication(errors)
def load_config(self): import os.path if not os.path.exists(self.config_path): self.reset_config() return try: with open(self.config_path, 'r') as stream: config_str = stream.read() breadcrumbs.record(message="Config file content: " + config_str) with self._mutex: self.__items__ = yaml.load(config_str) if self.__items__ is None: raise IOError("Empty config file") if not "stream_host" in self.__items__: with self._mutex: self.__items__[ "stream_host"] = "http://stream.getanywhere.io" self.save_config() except IOError: self.sentry.captureException() self.reset_config()
def load_config(self): import os.path try: with open(self.config_path, 'r') as stream: config_str = stream.read() breadcrumbs.record(message="config path: " + self.config_path) breadcrumbs.record(message="Config file content: " + config_str) self.__items__ = yaml.load(config_str) if self.__items__ is None: raise IOError("Empty config file") if not "stream_host" in self.__items__: self.__items__["stream_host"] = "http://stream.getanywhere.io" self.save_config() if self.__items__["ws_host"] == "ws://getanywhere.herokuapp.com": self.__items__["ws_host"] = "wss://www.getanywhere.io" self.save_config() except IOError: self.reset_config()
def wrapper(contract, kwargs, history): ctx, ns, lines, bind = origin_make_context(contract, kwargs, history) record( processor=lambda data: data.update( {"category": "story", "message": repr(ctx)} # FIXME: Use pretty print. ) ) return ctx, ns, lines, bind
def record_with_helper(self, data): breadcrumbs.record( category="worker", message='breadcrumb message', level='warning', data=data ) raise CustomException("Error!")
def test_crumb_buffer(self): for enable in 1, 0: client = Client('http://*****:*****@example.com/0', enable_breadcrumbs=enable) with client.context: breadcrumbs.record(type='foo', data={'bar': 'baz'}, message='aha', category='huhu') crumbs = client.context.breadcrumbs.get_buffer() assert len(crumbs) == enable
def record_sql(vendor, alias, start, duration, sql, params): def processor(data): real_sql, real_params = format_sql(sql, params) if real_params: real_sql = real_sql % tuple(real_params) data.update({'message': real_sql, 'category': 'query'}) breadcrumbs.record(processor=processor)
def record_sql(vendor, alias, start, duration, sql, params): def processor(data): real_sql, real_params = format_sql(sql, params) if real_params: real_sql = real_sql % tuple(real_params) # maybe category to 'django.%s.%s' % (vendor, alias or # 'default') ? data.update({ 'message': real_sql, 'category': 'query', }) breadcrumbs.record(processor=processor)
def stream_up(self, stream_host, token, printer, remote_status, settings, sentryClient): class UpStream: def __init__(self, printer, settings): self.settings = settings self.last_reconnect_ts = datetime.now() self.printer = printer self.remote_status = remote_status self.last_frame_ts = datetime.min def __iter__(self): return self def seconds_remaining_until_next_cycle(self): cycle_in_seconds = 1.0/3.0 # Limit the bandwidth consumption to 3 frames/second if not self.printer.get_state_id() in ['PRINTING', 'PAUSED']: # Printer idle if self.remote_status['watching']: cycle_in_seconds = 2 else: cycle_in_seconds = 20 else: if not self.remote_status['watching']: cycle_in_seconds = 10 return cycle_in_seconds-(datetime.now() - self.last_frame_ts).total_seconds() def next(self): if (datetime.now() - self.last_reconnect_ts).total_seconds() < 60: # Allow connection to last up to 60s try: while self.seconds_remaining_until_next_cycle() > 0: time.sleep(0.1) self.last_frame_ts = datetime.now() return capture_mjpeg(self.settings) except: sentryClient.captureException() raise StopIteration() else: raise StopIteration() # End connection so that `requests.post` can process server response backoff = ExpoBackoff(1200) while not self.should_quit(): try: breadcrumbs.record(message="New UpStream: " + token) stream = UpStream(printer, settings) requests.post(stream_host + "/video", data=stream, headers={"Authorization": "Bearer " + token}).raise_for_status() backoff.reset() except Exception, e: _logger.error(e) backoff.more()
def write_log(level, message_out): current_time = str(time.strftime('%c')) time_since_start = format(time.perf_counter() - start_time, '.4f') # the format() adds trailing zeroes log_file = open(filename, 'a') full_line = "[{} +{}] {}: {}\n".format(current_time, time_since_start, level, message_out) log_file.write(full_line) log_file.close() if to_stderr: print(full_line, file=sys.stderr) breadcrumbs.record(message=full_line, level=level) # sentry level = custom level
def tex_compiler(self, texfile): command = 'lualatex -shell-escape -interaction=nonstopmode {}'.format(texfile) command_process = subprocess.Popen(command, shell=True, cwd=path.dirname(texfile), stdout=subprocess.PIPE, stderr=subprocess.PIPE) command_process.communicate() with contextlib.suppress(ImportError): from raven import breadcrumbs breadcrumbs.record(message='lualatex call', data=command, type='cmd') pdf_file_path = path.splitext(texfile)[0] + self.extension return path.exists(pdf_file_path)
def make_glossary(self, basename, texfile): command = "makeglossaries {}".format(basename) command_process = subprocess.Popen( command, shell=True, cwd=path.dirname(texfile), stdout=subprocess.PIPE, stderr=subprocess.PIPE ) std_out, std_err = command_process.communicate() with contextlib.suppress(ImportError): from raven import breadcrumbs breadcrumbs.record(message="makeglossaries call", data=command, type="cmd") # TODO: check makeglossary exit codes to see if we can enhance error detection if "fatal" not in std_out.decode("utf-8").lower() and "fatal" not in std_err.decode("utf-8").lower(): return True self.handle_makeglossaries_error(texfile)
def tex_compiler(self, texfile, draftmode: str = ""): command = "lualatex -shell-escape -interaction=nonstopmode {} {}".format(draftmode, texfile) command_process = subprocess.Popen( command, shell=True, cwd=path.dirname(texfile), stdout=subprocess.PIPE, stderr=subprocess.PIPE ) # let's put 10 min of timeout because we do not generate latex everyday command_process.communicate(timeout=600) with contextlib.suppress(ImportError): from raven import breadcrumbs breadcrumbs.record(message="lualatex call", data=command, type="cmd") pdf_file_path = path.splitext(texfile)[0] + self.extension return path.exists(pdf_file_path)
def has_scopes(self, names): if isinstance(names, list): for scope in names: if not self._check_scope(scope): return False return True elif isinstance(names, str): if not self._check_scope(names): breadcrumbs.record( message="has_scopes: {}: {}".format(names, False)) return False else: breadcrumbs.record( message="has_scopes: {}: {}".format(names, True)) return True
def get_context(bot, update, session, user): """Create a context object for callback queries.""" context = CallbackContext(session, bot, update.callback_query, user) breadcrumbs.record( data={ "query": update.callback_query, "data": update.callback_query.data, "user": user, "callback_type": context.callback_type, "callback_result": context.callback_result, "poll": context.poll, }, category="callbacks", ) return context
def make_glossary(self, basename, texfile): command = 'makeglossaries {}'.format(basename) command_process = subprocess.Popen(command, shell=True, cwd=path.dirname(texfile), stdout=subprocess.PIPE, stderr=subprocess.PIPE) std_out, std_err = command_process.communicate() with contextlib.suppress(ImportError): from raven import breadcrumbs breadcrumbs.record(message='makeglossaries call', data=command, type='cmd') # TODO: check makeglossary exit codes to see if we can enhance error detection if 'fatal' not in std_out.decode('utf-8').lower() and 'fatal' not in std_err.decode('utf-8').lower(): return True self.handle_makeglossaries_error(texfile)
def call_tg_func(tg_object: object, function_name: str, args: list = None, kwargs: dict = None): """Call a tg object member function. We need to handle those calls in case we get rate limited. """ current_try = 1 tries = 4 exception = None while current_try < tries: try: args = args if args else [] kwargs = kwargs if kwargs else {} breadcrumbs.record(data={"action": f"Starting: {datetime.now()}"}, category="info") retrieved_object = getattr(tg_object, function_name)(*args, **kwargs) return retrieved_object except (TimedOut, NetworkError) as e: # Can't update message. just ignore it if "Message to edit not found" in str( e) or "Message is not modified" in str(e): raise e timeout = 2 * current_try breadcrumbs.record(data={"action": f"Exception: {datetime.now()}"}, category="info") logger = logging.getLogger() logger.info( f"Try {current_try}: Got telegram exception waiting {timeout} secs." ) logger.info(e.message) if config["logging"]["debug"]: sentry.captureException() time.sleep(timeout) current_try += 1 exception = e pass raise exception
def tex_compiler(self, texfile): command = 'lualatex -shell-escape -interaction=nonstopmode {}'.format( texfile) command_process = subprocess.Popen(command, shell=True, cwd=path.dirname(texfile), stdout=subprocess.PIPE, stderr=subprocess.PIPE) command_process.communicate() with contextlib.suppress(ImportError): from raven import breadcrumbs breadcrumbs.record(message='lualatex call', data=command, type='cmd') pdf_file_path = path.splitext(texfile)[0] + self.extension return path.exists(pdf_file_path)
def call_tg_func(tg_object: object, function_name: str, args: list = None, kwargs: dict = None): """Call a tg object member function. We need to handle those calls in case we get rate limited. """ _try = 0 tries = 2 exception = None while _try < tries: try: args = args if args else [] kwargs = kwargs if kwargs else {} breadcrumbs.record(data={'action': f'Starting: {datetime.now()}'}, category='info') retrieved_object = getattr(tg_object, function_name)(*args, **kwargs) return retrieved_object except (TimedOut, NetworkError) as e: # Can't update message. just ignore it if 'Message to edit not found' in str(e) or \ 'Message is not modified' in str(e): raise e breadcrumbs.record(data={'action': f'Exception: {datetime.now()}'}, category='info') logger = logging.getLogger() logger.info(f'Got telegram exception waiting 4 secs.') logger.info(e) if config.DEBUG: sentry.captureException() time.sleep(4) _try += 1 exception = e pass raise exception
def wrapper(self, *args, **kwargs): service_name = self.__class__.__name__ breadcrumbs.record(message='Call args', category=service_name, level='info', data=dict({ 'args': args, **kwargs })) try: self._reset_context() return func(self, *args, **kwargs) except Exception as e: self.logger.error( f'Service {service_name} thrown exception, context: {self._context}', exc_info=e) raise
def activate_deactivate(self, a, b, c): breadcrumbs.record(category="worker", message=a) self.sentry.context.deactivate() breadcrumbs.record(category="worker", message=b) self.sentry.context.activate() breadcrumbs.record(category="worker", message=c) raise CustomException("Error!")
def record_log_breadcrumb(record): # lazy import avoids any raven loggers being initialised early from raven import breadcrumbs breadcrumb_handler_args = ( logging.getLogger(record.name), record.levelno, record.message, record.args, { 'extra': record._structured, 'exc_info': record.exc_info, 'stack_info': getattr(record, 'stack_info', None) }, ) for handler in getattr(breadcrumbs, 'special_logging_handlers', []): if handler(*breadcrumb_handler_args): return handler = breadcrumbs.special_logger_handlers.get(record.name) if handler is not None and handler(*breadcrumb_handler_args): return def processor(data): metadata = { 'path': record.pathname, 'lineno': record.lineno, } if hasattr(record, 'func'): metadata['func'] = record.func metadata.update(record._structured) data.update({ 'message': record.message, 'category': record.name, 'level': record.levelname.lower(), 'data': metadata, }) breadcrumbs.record(processor=processor)
def handle_tex_compiler_error(latex_file_path, ext): # TODO zmd: fix extension parsing log_file_path = latex_file_path[:-3] + 'log' errors = ['Error occured, log file {} not found.'.format(log_file_path)] with contextlib.suppress(FileNotFoundError, UnicodeDecodeError): with Path(log_file_path).open(encoding='utf-8') as latex_log: # TODO zmd: see if the lines we extract here contain enough info for debugging purpose print_context = 25 lines = [] relevant_line = -print_context for idx, line in enumerate(latex_log): if 'fatal' in line.lower() or 'error' in line.lower(): relevant_line = idx lines.append(line) elif idx - relevant_line < print_context: lines.append(line) errors = '\n'.join(lines) logger.debug('%s ext=%s', errors, ext) with contextlib.suppress(ImportError): from raven import breadcrumbs breadcrumbs.record(message='luatex call', data=errors, type='cmd') raise FailureDuringPublication(errors)
def _check_scope(self, scope): if scope not in CharacterESIRoles.FIELD_MAPPING.keys(): raise AttributeError( "Scope {} requested but not in mapping".format(scope)) db_field_name = self._get_scope_db_field(scope) breadcrumbs.record(message="db field: {}".format(db_field_name)) result = getattr(self, db_field_name, None) if result: breadcrumbs.record(message="Result: {}".format(result)) breadcrumbs.record(message="Result: {}".format(result)) return result
def hello(): breadcrumbs.record(message='Hey, me looking for number') dividend = request.args.get('dividend') divisor = request.args.get('divisor') breadcrumbs.record(message='OK, me got the nummers') breadcrumbs.record(message='Wow, such message, much importance', level='info') dividend = int(dividend) divisor = int(divisor) if divisor == 0: return '0' try: return str(int(dividend)/int(divisor)) except: sentry.captureException() return 'BOOM'
def new_func(self): breadcrumbs.record(type='dummy', category='dummy', message="Dummy message") old_func(self)
def watch_loop(): v1 = client.CoreV1Api() w = watch.Watch() sentry = SentryClient( dsn=DSN, install_sys_hook=False, install_logging_hook=False, include_versions=False, capture_locals=False, context={}, environment=ENV, release=RELEASE, transport=ThreadedRequestsHTTPTransport, ) # try: # resource_version = v1.list_event_for_all_namespaces().items[-1].metadata.resource_version # except: # resource_version = 0 if EVENT_NAMESPACE: stream = w.stream(v1.list_namespaced_event, EVENT_NAMESPACE) else: stream = w.stream(v1.list_event_for_all_namespaces) for event in stream: logging.debug("event: %s" % event) event_type = event['type'].lower() event = event['object'] meta = { k: v for k, v in event.metadata.to_dict().items() if v is not None } creation_timestamp = meta.pop('creation_timestamp', None) level = (event.type and event.type.lower()) level = LEVEL_MAPPING.get(level, level) component = source_host = reason = namespace = name = short_name = kind = None if event.source: source = event.source.to_dict() if 'component' in source: component = source['component'] if 'host' in source: source_host = source['host'] if event.reason: reason = event.reason if event.involved_object and event.involved_object.namespace: namespace = event.involved_object.namespace elif 'namespace' in meta: namespace = meta['namespace'] if event.involved_object and event.involved_object.kind: kind = event.involved_object.kind if event.involved_object and event.involved_object.name: name = event.involved_object.name if not MANGLE_NAMES or kind in MANGLE_NAMES: bits = name.split('-') if len(bits) in (1, 2): short_name = bits[0] else: short_name = "-".join(bits[:-2]) else: short_name = name message = event.message if namespace and short_name: obj_name = "(%s/%s)" % (namespace, short_name) else: obj_name = "(%s)" % (namespace, ) if level in ('warning', 'error') or event_type in ('error', ): if event.involved_object: meta['involved_object'] = { k: v for k, v in event.involved_object.to_dict().items() if v is not None } fingerprint = [] tags = {} if component: tags['component'] = component if reason: tags['reason'] = event.reason fingerprint.append(event.reason) if namespace: tags['namespace'] = namespace fingerprint.append(namespace) if short_name: tags['name'] = short_name fingerprint.append(short_name) if kind: tags['kind'] = kind fingerprint.append(kind) data = { 'sdk': SDK_VALUE, 'server_name': source_host or 'n/a', 'culprit': "%s %s" % (obj_name, reason), } sentry.captureMessage( message, # culprit=culprit, data=data, date=creation_timestamp, extra=meta, fingerprint=fingerprint, level=level, tags=tags, ) data = {} if name: data['name'] = name if namespace: data['namespace'] = namespace breadcrumbs.record( data=data, level=level, message=message, timestamp=time.mktime(creation_timestamp.timetuple()), )
def watch_loop(): logging.info("Starting Kubernetes watcher") v1 = client.CoreV1Api() w = watch.Watch() logging.info("Initializing Sentry client") sentry = SentryClient( dsn=DSN, install_sys_hook=False, install_logging_hook=False, include_versions=False, capture_locals=False, context={}, environment=ENV, release=RELEASE, transport=ThreadedRequestsHTTPTransport, ) # try: # resource_version = v1.list_event_for_all_namespaces().items[-1].metadata.resource_version # except: # resource_version = 0 if EVENT_NAMESPACES and len(EVENT_NAMESPACES) == 1: stream = w.stream(v1.list_namespaced_event, EVENT_NAMESPACES[0]) else: stream = w.stream(v1.list_event_for_all_namespaces) for event in stream: logging.debug("event: %s" % event) event_type = event["type"].lower() event = event["object"] meta = { k: v for k, v in event.metadata.to_dict().items() if v is not None } creation_timestamp = meta.pop("creation_timestamp", None) level = event.type and event.type.lower() level = LEVEL_MAPPING.get(level, level) component = source_host = reason = namespace = name = short_name = kind = None if event.source: source = event.source.to_dict() if "component" in source: component = source["component"] if COMPONENTS_EXCLUDED and component in COMPONENTS_EXCLUDED: continue if "host" in source: source_host = source["host"] if event.reason: reason = event.reason if REASONS_EXCLUDED and reason in REASONS_EXCLUDED: continue if event.involved_object and event.involved_object.namespace: namespace = event.involved_object.namespace elif "namespace" in meta: namespace = meta["namespace"] if namespace and EVENT_NAMESPACES and namespace not in EVENT_NAMESPACES: continue if namespace and EVENT_NAMESPACES_EXCLUDED and namespace in EVENT_NAMESPACES_EXCLUDED: continue if event.involved_object and event.involved_object.kind: kind = event.involved_object.kind if event.involved_object and event.involved_object.name: name = event.involved_object.name if not MANGLE_NAMES or kind in MANGLE_NAMES: bits = name.split("-") if len(bits) in (1, 2): short_name = bits[0] else: short_name = "-".join(bits[:-2]) else: short_name = name message = event.message if namespace and short_name: obj_name = "(%s/%s)" % (namespace, short_name) else: obj_name = "(%s)" % (namespace, ) if level in EVENT_LEVELS or event_type in ("error", ): if event.involved_object: meta["involved_object"] = { k: v for k, v in event.involved_object.to_dict().items() if v is not None } fingerprint = [] tags = {} if CLUSTER_NAME: tags["cluster"] = CLUSTER_NAME if component: tags["component"] = component if reason: tags["reason"] = event.reason fingerprint.append(event.reason) if namespace: tags["namespace"] = namespace fingerprint.append(namespace) if short_name: tags["name"] = short_name fingerprint.append(short_name) if kind: tags["kind"] = kind fingerprint.append(kind) data = { "sdk": SDK_VALUE, "server_name": source_host or "n/a", "culprit": "%s %s" % (obj_name, reason), } logging.debug("Sending event to Sentry:\n{}".format(data)) sentry.captureMessage( message, # culprit=culprit, data=data, date=creation_timestamp, extra=meta, fingerprint=fingerprint, level=level, tags=tags, ) data = {} if name: data["name"] = name if namespace: data["namespace"] = namespace breadcrumbs.record( data=data, level=level, message=message, timestamp=time.mktime(creation_timestamp.timetuple()), )