def test_error(): with pytest.raises(RuntimeError): Options.set("no key", 42) with configure_scope() as scope: scope._should_capture = False Options.set("no key", 42, fail_on_error=False) with pytest.raises(TypeError) as err: Options.set("delay", "foo") msg = err.value.args[0] assert "delay" in msg assert "test.ini" not in msg assert "debugging" not in msg # Test the 'section' arg with pytest.raises(TypeError) as err: Options.set("delay", "foo", file="test.ini") msg = err.value.args[0] assert "delay" in msg assert "test.ini" in msg assert "debugging" not in msg # Test `file` and `section` args with pytest.raises(TypeError) as err: Options.set("delay", "foo", file="test.ini", section="debugging") msg = err.value.args[0] assert "delay" in msg assert "test.ini" in msg assert "debugging" in msg
def __init__(self): uri = uri_parser.parse_uri(MONGODB_URL) client = MongoClient(uri['nodelist'][0][0], uri['nodelist'][0][1]) self.mongo_db = client[uri['database']] self.__stations_collection = self.mongo_db.stations self.__stations_collection.create_index([('loc', GEOSPHERE), ('status', ASCENDING), ('pv-code', ASCENDING), ('short', ASCENDING), ('name', ASCENDING)]) self.collection_names = self.mongo_db.collection_names() self.redis = redis.StrictRedis.from_url(url=REDIS_URL, decode_responses=True) self.google_api_key = GOOGLE_API_KEY self.log = get_logger(self.provider_code) sentry_sdk.init(SENTRY_URL) with sentry_sdk.configure_scope() as scope: scope.set_tag('provider', self.provider_name)
def process_request(self, request): span_id = six.text_type(uuid1()) transaction_id = request.META.get('HTTP_X_TRANSACTION_ID') or six.text_type(uuid1()) request_id = request.META.get('HTTP_X_REQUEST_ID') with sentry_sdk.configure_scope() as scope: scope.set_tag('span_id', span_id) scope.set_tag('transaction_id', transaction_id) if request_id is not None: scope.set_tag('request_id', request_id) logging.bind('sentry', request_id=request_id) else: # Need to be explicitly unbound when not set # otherwise it'll carry onto future requests logging.unbind('sentry', 'request_id')
def authentication_callback(user_id, request): "This is how pyramid knows the user's permissions" connection = User.default_db.connection() connection.info['userid'] = user_id discussion = discussion_from_request(request) discussion_id = discussion.id if discussion else None # this is a good time to tell sentry about the user from sentry_sdk import configure_scope with configure_scope() as scope: if user_id: scope.user = { "id": str(user_id) } if discussion_id: scope.set_tag('discussion_id', discussion_id) return get_roles(user_id, discussion_id)
def dispatch(self, email, attrs): url = '%s/key:%s/send' % (self.url_base, self.external_key) headers = self.token.headers() req_data = { "To": { "Address": email, "SubscriberKey": email, "ContactAttributes": { "SubscriberAttributes": attrs }, }, "OPTIONS": { "RequestType": "SYNC" }, } resp = requests.post(url, headers=headers, json=req_data) resp.raise_for_status() reply = resp.json() with sentry_sdk.configure_scope() as scope: scope.set_extra("reply_data", reply) for response in reply['responses']: if response['hasErrors']: error_messages = response.get('messageErrors', []) # set of error codes in messages error_codes = {err.get('messageErrorCode') for err in error_messages} # coerce ET_IGNORED_ERROR_CODES into set from . import settings codes_to_ignore = set(settings.ET_IGNORED_ERROR_CODES) # if response has any ignorable codes... set intersection if len(codes_to_ignore & error_codes) > 0: # log error, but don't send ignored error codes to Sentry logger.warn("Suppressed exception for ET API exception. Error response: {}".format(reply)) else: sentry_sdk.capture_message('Error occurred while submitting subscriber to exact target') reg_messages = response.get('messages') # sometimes there are structured errors if len(error_messages): raise exceptions.TriggeredSendException(error_messages[0]['messageErrorStatus']) # sometimes there are plain text errors elif len(reg_messages): raise exceptions.TriggeredSendException(reg_messages[0]) else: raise exceptions.TriggeredSendException('Unknown TriggeredSend Error Occurred') return reply
def run(self, result=None): """ I could not (yet?) migrate this method to pytest because I did not find a way to make tests pass. We need to start each test in a thread and call self.app.exec_() to let signals transit in the QApplication. """ log.info("TEST run start") def launch_test(): # Note: we cannot use super().run(result) here super(TwoUsersTest, self).run(result) with suppress(Exception): self.app.quit() # Ensure to kill the app if it is taking too long. # We need to do that because sometimes a thread get blocked and so the test suite. # Here, we set the timeout to 00:02:00, let's see if a higher value is needed. timeout = 2 * 60 def kill_test(): log.error(f"Killing {self.id()} after {timeout} seconds") self.app.quit() QTimer.singleShot(timeout * 1000, kill_test) # Start the app and let signals transit between threads! sync_thread = Thread(target=launch_test) with configure_scope() as scope: scope.set_tag("test", self.current_test) sync_thread.start() self.app.exec_() sync_thread.join(30) log.info("TEST run end")
def _handle_consume_exception(self, events: List[Dict[str, Any]], exception: Exception) -> None: with configure_scope() as scope: scope.set_context("events", { "data": events, "queue_name": self.queue_name, }) if isinstance(exception, WorkerTimeoutException): logging.exception("%s in queue %s", str(exception), self.queue_name, stack_info=True) else: logging.exception("Problem handling data on queue %s", self.queue_name, stack_info=True) if not os.path.exists(settings.QUEUE_ERROR_DIR): os.mkdir(settings.QUEUE_ERROR_DIR) # nocoverage # Use 'mark_sanitized' to prevent Pysa from detecting this false positive # flow. 'queue_name' is always a constant string. fname = mark_sanitized(f'{self.queue_name}.errors') fn = os.path.join(settings.QUEUE_ERROR_DIR, fname) line = f'{time.asctime()}\t{orjson.dumps(events).decode()}\n' lock_fn = fn + '.lock' with lockfile(lock_fn): with open(fn, 'ab') as f: f.write(line.encode('utf-8')) check_and_send_restart_signal()
def scrape(event, context): for scraper in SCRAPERS: scraper_name = scraper.__name__ with sentry_sdk.configure_scope() as scope: scope.set_tag("scraper", scraper_name) try: scraper() now = datetime.datetime.now() print(f'Updated {scraper_name} at {now}') except Exception as e: # Catch and send error to Sentry manually so we can continue # running other scrapers if one fails print(f'Scraper {scraper_name} failed with {e}') print(e) sentry_sdk.capture_exception(e) body = { "message": f"Ran {len(SCRAPERS)} scrapers successfully.", } response = {"statusCode": 200, "body": json.dumps(body)} return response
def get_stacktrace_link(self, repo, filepath, default, version): """ Handle formatting and returning back the stack trace link if the client request was successful. Uses the version first, and re-tries with the default branch if we 404 trying to use the version (commit sha). If no file was found return `None`, and re-raise for non "Not Found" errors """ with configure_scope() as scope: scope.set_tag("stacktrace_link.tried_version", False) if version: scope.set_tag("stacktrace_link.tried_version", True) source_url = self.check_file(repo, filepath, version) if source_url: scope.set_tag("stacktrace_link.used_version", True) return source_url scope.set_tag("stacktrace_link.used_version", False) source_url = self.check_file(repo, filepath, default) return source_url
def test_no_stackoverflows(celery): """We used to have a bug in the Celery integration where its monkeypatching was repeated for every task invocation, leading to stackoverflows. See https://github.com/getsentry/sentry-python/issues/265 """ results = [] @celery.task(name="dummy_task") def dummy_task(): with configure_scope() as scope: scope.set_tag("foo", "bar") results.append(42) for _ in range(10000): dummy_task.delay() assert results == [42] * 10000 with configure_scope() as scope: assert not scope._tags
def __init__(self, options: Dict[str, Any]): del options['provider'] if 'enable' in options: del options['enable'] if 'available' in options: del options['available'] token = options.pop('token') integrations = [] if not options.get('capture_logs'): integrations.append( LoggingIntegration(level=logging.debug, event_level=logging.ERROR)) if 'capture_logs' in options: del options['capture_logs'] if 'ignored_loggers' in options: for logger in options['ignored_loggers']: ignore_logger(logger) del options['ignored_loggers'] sentry_sdk.init(dsn=token, integrations=integrations, **options) with sentry_sdk.configure_scope() as scope: scope.user = {'id': get_device_id()}
async def _on_error(context: TurnContext, error: Exception): # noinspection PyBroadException try: user_id = f"{context.activity.channel_id}/{context.activity.from_property.id}" username = context.activity.from_property.name except Exception: user_id = None username = None # Send the exception to sentry with configure_scope() as scope: scope.user = {"id": user_id, "username": username} scope.set_extra("channel", context.activity.channel_id) scope.set_extra("message", context.activity.text) capture_exception(error) # Log the error print(f"\n [on_turn_error] unhandled error: {error}", file=sys.stderr) traceback.print_exc() # Send a message to the user await context.send_activity(ResponseMsgs.get('error'))
def start(self, deamon=True, port=None, debug=False): user = getpass.getuser() if user != self.__shell_user: raise ValueError( f"{user} not autorized, use {self.__shell_user} instead") if port: self.__port = port print("Start Runner", __version__) try: if os.environ.get("NAAS_SENTRY_DSN"): sentry_sdk.init( dsn=os.environ.get("NAAS_SENTRY_DSN"), traces_sample_rate=1.0, environment=escape_kubernet(self.__user), integrations=[SanicIntegration()], ) sentry_sdk.set_user({"email": self.__user}) with sentry_sdk.configure_scope() as scope: scope.set_context("Naas", {"version": __version__}) self.__main(debug) except KeyboardInterrupt: print("Shutdown server") sys.exit()
def setup_sentry(hs): """Enable sentry integration, if enabled in configuration Args: hs (synapse.server.HomeServer) """ if not hs.config.sentry_enabled: return import sentry_sdk sentry_sdk.init(dsn=hs.config.sentry_dsn, release=get_version_string(synapse)) # We set some default tags that give some context to this instance with sentry_sdk.configure_scope() as scope: scope.set_tag("matrix_server_name", hs.config.server_name) app = hs.config.worker_app if hs.config.worker_app else "synapse.app.homeserver" name = hs.config.worker_name if hs.config.worker_name else "master" scope.set_tag("worker_app", app) scope.set_tag("worker_name", name)
def subhub_post(request): if not has_valid_api_key(request): return HttpResponseJSON( { 'status': 'error', 'desc': 'requires a valid API-key', 'code': errors.BASKET_AUTH_ERROR, }, 401) try: data = json.loads(request.body) except ValueError: statsd.incr('subhub_post.message.json_error') with sentry_sdk.configure_scope() as scope: scope.set_extra('request.body', request.body) sentry_sdk.capture_exception() return HttpResponseJSON( { 'status': 'error', 'desc': 'JSON error', 'code': errors.BASKET_USAGE_ERROR, }, 400) else: etype = data['event_type'] processor = SUBHUB_EVENT_TYPES.get(etype) if processor: processor.delay(data) return HttpResponseJSON({'status': 'ok'}) else: return HttpResponseJSON( { 'desc': 'unknown event type', 'status': 'error', 'code': errors.BASKET_USAGE_ERROR }, 400)
async def connect(self): self.content = [] self.conn_time = time.time() await self.accept() await self.channel_layer.group_add( GROUP_VERSION.format(label=settings.VENUELESS_COMMIT + "." + settings.VENUELESS_ENVIRONMENT), self.channel_name, ) await register_connection() try: self.world = await get_world( self.scope["url_route"]["kwargs"]["world"]) except OperationalError: # We use connection pooling, so if the database server went away since the last connection # terminated, Django won't know and we'll get an OperationalError. We just silently re-try # once, since Django will then use a new connection. self.world = await get_world( self.scope["url_route"]["kwargs"]["world"]) if self.world is None: await self.send_error("world.unknown_world", close=True) return if settings.SENTRY_DSN: with configure_scope() as scope: scope.set_extra("world", self.world.id) self.components = { "chat": ChatModule(self), "user": AuthModule(self), "bbb": BBBModule(self), "room": RoomModule(self), "exhibition": ExhibitionModule(self), "world": WorldModule(self), }
def post(self, request, installation): with sentry_sdk.configure_scope() as scope: scope.set_tag("organization", installation.organization_id) scope.set_tag("sentry_app_id", installation.sentry_app_id) scope.set_tag("sentry_app_slug", installation.sentry_app.slug) try: if request.json_body.get( "grant_type") == GrantTypes.AUTHORIZATION: token = GrantExchanger.run( install=installation, code=request.json_body.get("code"), client_id=request.json_body.get("client_id"), user=request.user, ) elif request.json_body.get("grant_type") == GrantTypes.REFRESH: token = Refresher.run( install=installation, refresh_token=request.json_body.get("refresh_token"), client_id=request.json_body.get("client_id"), user=request.user, ) else: return Response({"error": "Invalid grant_type"}, status=403) except APIUnauthorized as e: logger.warning(e, exc_info=True) return Response({"error": e.msg or "Unauthorized"}, status=403) attrs = { "state": request.json_body.get("state"), "application": None } body = ApiTokenSerializer().serialize(token, attrs, request.user) return Response(body, status=201)
def send_message(self, message): sentry_client = Client(dsn=self.sentry_dsn, environment=self.environment) with configure_scope() as scope: tags = self.get_tags(message) for key, val in tags.items(): scope.set_tag(key, val) scope.set_extra("job_link", message.get("job_link", "")) scope.set_extra("spider_name", message.get("spider_name", "")) scope.set_extra("items_count", message.get("items_count", 0)) scope.set_extra("passed_monitors_count", message.get("passed_monitors_count", 0)) scope.set_extra("failed_monitors_count", message.get("failed_monitors_count", 0)) scope.set_extra("failed_monitors", message.get("failed_monitors", [])) sentry_client.capture_event( { "message": "{title} \n {description}".format( title=message.get("title"), description=message.get("failure_reasons", ""), ), "level": self.sentry_log_level, }, scope=scope, ) logger.info("Notification sent to the sentry dashboard!!") sentry_client.close()
def register_extensions(app): db.init_app(app) executor.init_app(app) basic_auth.init_app(app) @app.before_request def enable_form_raw_cache(): # Workaround to allow unparsed request body to be be read from cache # This is required to validate a signature on webhooks # This MUST go before Sentry integration as sentry triggers form parsing if not config.IS_TEST and ( request.path.startswith('/api/v1/slack/') or request.path.startswith('/api/v1/poli_payments_webhook/')): if request.content_length > 1024 * 1024: # 1mb # Payload too large return make_response(jsonify({'message': 'Payload too large'})), 413 request.get_data(parse_form_data=False, cache=True) # limiter.init_app(app) CORS(app, resources={r"/api/*": {"origins": "*"}}) celery_app.conf.update(app.config) if not config.IS_TEST: sentry_sdk.init(app.config['SENTRY_SERVER_DSN'], integrations=[ FlaskIntegration(), SqlalchemyIntegration(), RedisIntegration() ], release=config.VERSION) with configure_scope() as scope: scope.set_tag("domain", config.APP_HOST) print('celery joined on {} at {}'.format(app.config['REDIS_URL'], datetime.utcnow()))
def sync_feed(feed: Feed): """Sync feed parsing against the database.""" with configure_scope() as scope: logger.debug(f"Processing feed {feed.link}") scope.set_tag("feed", feed.title) r = requests.get(feed.link, headers={"User-Agent": REQUESTS_USER_AGENT}) scope.set_extra("body", r.text) if r.status_code != 200: logger.error(f"{r.status_code} received when scraping {feed.link}", exc_info=True) IngestLog.objects.create(feed=feed, state=IngestLog.STATE_NOT_RESPONDING, body=r.text) return parser = RSSParser(feed) parser.parse(r) feed.date_last_scraped = maya.now().datetime() feed.save()
async def new_func(*def_args, **def_kwargs): message = def_args[0] if cmds: message.conf["cmds"] = cmds if allow_kwargs is False: def_kwargs = dict() with configure_scope() as scope: parsed_update = parse_update(dict(message)) scope.set_extra("update", str(parsed_update)) if DEBUG_MODE: # log.debug('[*] Starting {}.'.format(func.__name__)) # log.debug('Event: \n' + str(message)) start = time.time() await func(*def_args, **def_kwargs) log.debug("[*] {} Time: {} sec.".format( func.__name__, time.time() - start)) else: await func(*def_args, **def_kwargs) raise SkipHandler()
def setup_sentry(hs): """Enable sentry integration, if enabled in configuration Args: hs (synapse.server.HomeServer) """ if not hs.config.sentry_enabled: return import sentry_sdk sentry_sdk.init( dsn=hs.config.sentry_dsn, release=get_version_string(synapse), ) # We set some default tags that give some context to this instance with sentry_sdk.configure_scope() as scope: scope.set_tag("matrix_server_name", hs.config.server_name) app = hs.config.worker_app if hs.config.worker_app else "synapse.app.homeserver" name = hs.config.worker_name if hs.config.worker_name else "master" scope.set_tag("worker_app", app) scope.set_tag("worker_name", name)
def write_error(self, status_code, exc_info): etype, exc, _ = exc_info finish_args = [] with sentry_sdk.configure_scope() as scope: if issubclass(etype, exceptions.WaterButlerError): if exc.is_user_error: scope.level = 'info' self.set_status(int(exc.code)) finish_args = [exc.data ] if exc.data else [{ 'code': exc.code, 'message': exc.message }] elif issubclass(etype, tasks.WaitTimeOutError): self.set_status(202) scope.level = 'info' else: finish_args = [{'code': status_code, 'message': self._reason}] sentry_sdk.capture_exception(exc_info) self.finish(*finish_args)
def sentry_init(obs_apiurl=None, tags=None): try: import sentry_sdk except ImportError: sentry_init.client = sentry_client_dummy() return sentry_sdk_dummy() sentry_init.client = sentry_sdk.init( conf.config.get('sentry_sdk.dsn'), environment=conf.config.get('sentry_sdk.environment'), release=VERSION) with sentry_sdk.configure_scope() as scope: scope.set_tag('osc', core.__version__) if obs_apiurl: scope.set_tag('obs_apiurl', obs_apiurl) scope.user = {'username': conf.get_apiurl_usr(obs_apiurl)} if tags: for key, value in tags.items(): scope.set_tag(key, value) return sentry_sdk
def handle(data): # Add some info about the user to the scope with configure_scope() as scope: scope.user = { "name": data["name"], "mailfrom": data["mailfrom"], "mailto": data["mailto"], } req_fields = data_is_valid(data) if len(req_fields) > 0: return f'Requires fields {", ".join(req_fields)}', 400 try: data = modify_data(data) except UnsupportedFileException as e: logging.error(f"Unsupported file type: {e}") return ( "En av filene som ble lastet opp er ikke i støttet format. Bruk PNG, JPEG, GIF, HEIC eller PDF", 400, ) try: file = create_pdf(data) mail.send_mail([data["mailto"], data["mailfrom"]], data, file) except RuntimeError as e: logging.warning(f"Failed to generate pdf with exception: {e}") return f"Klarte ikke å generere pdf: {e}", 500 except mail.MailConfigurationException as e: logging.warning(f"Failed to send mail: {e}") return f"Klarte ikke å sende email: {e}", 500 except Exception as e: logging.error(f"Failed with exception: {e}") return f"Noe uventet skjedde: {e}", 400 logging.info("Successfully generated pdf and sent mail") return "Kvitteringsskjema generert og sendt på mail!", 200
def post(self, request, *_, **__): with configure_scope() as scope: scope.user = { "username": self.request.user.username, "email": self.request.user.email, } self.object = self.get_object() form = forms.RoundForm(request.POST, round=self.object.current_round) if form.is_valid(): for player_1_performance, player_2_performance in form.results( ): current_round = self.object.current_round duel = current_round.get_duel_for_players( player_1_performance.player, player_2_performance.player) duel.set_player_performance(player_1_performance) duel.set_player_performance(player_2_performance) try: self.object.start_next_round() except AssertionError as error: import logging logger = logging.getLogger(__name__) logger.error('Error when pairing', exc_info=error) messages.warning( request, "Couldn't pair all players for next round. Finished this tournament." ) self.object.finish() return HttpResponseRedirect('#finished') return HttpResponseRedirect('#worked') return self.render_to_response(context=self.get_context_data( round_form=form))
def add_to_salsa(self, user): params = { 'zipcode': user.userzipcode.zipcode, 'first_name': user.first_name, 'last_name': user.last_name, 'email': user.email, } payload = settings.SALSA_PAYLOAD % params signup = requests.post( settings.SALSA_URL, data=payload, headers={'content-type': 'application/json; charset=utf-8'}) if signup.status_code > 200 or signup.json().get('errors'): with configure_scope() as scope: scope.user = {'id': user.id, 'email': user.email} for key, value in signup.json().items(): scope.set_extra(key, value) capture_message( 'Salsa returned an error while capturing email "{}"'. format(user.email))
def __init__(self, run_id=None, mode=None, dir=None, group=None, job_type=None, config=None, sweep_id=None, storage_id=None, description=None, resume=None, program=None, args=None, wandb_dir=None, tags=None, name=None, notes=None, api=None): """Create a Run. Arguments: description (str): This is the old, deprecated style of description: the run's name followed by a newline, followed by multiline notes. """ # self.storage_id is "id" in GQL. self.storage_id = storage_id # self.id is "name" in GQL. self.id = run_id if run_id else util.generate_id() # self._name is "display_name" in GQL. self._name = None self.notes = None self.resume = resume if resume else 'never' self.mode = mode if mode else 'run' self.group = group self.job_type = job_type self.pid = os.getpid() self.resumed = False # we set resume when history is first accessed if api: if api.current_run_id and api.current_run_id != self.id: raise RuntimeError( 'Api object passed to run {} is already being used by run {}' .format(self.id, api.current_run_id)) else: api.set_current_run_id(self.id) self._api = api if dir is None: self._dir = run_dir_path(self.id, dry=self.mode == 'dryrun') else: self._dir = os.path.abspath(dir) self._mkdir() # self.name and self.notes used to be combined into a single field. # Now if name and notes don't have their own values, we get them from # self._name_and_description, but we don't update description.md # if they're changed. This is to discourage relying on self.description # and self._name_and_description so that we can drop them later. # # This needs to be set before name and notes because name and notes may # influence it. They have higher precedence. self._name_and_description = None if description: wandb.termwarn( 'Run.description is deprecated. Please use wandb.init(notes="long notes") instead.' ) self._name_and_description = description elif os.path.exists(self.description_path): with open(self.description_path) as d_file: self._name_and_description = d_file.read() if name is not None: self.name = name if notes is not None: self.notes = notes self.program = program if not self.program: try: import __main__ self.program = __main__.__file__ except (ImportError, AttributeError): # probably `python -c`, an embedded interpreter or something self.program = '<python with no main file>' self.args = args if self.args is None: self.args = sys.argv[1:] self.wandb_dir = wandb_dir with configure_scope() as scope: self.project = self.api.settings("project") scope.set_tag("project", self.project) scope.set_tag("entity", self.entity) try: scope.set_tag("url", self.get_url(self.api, network=False) ) # TODO: Move this somewhere outside of init except CommError: pass if self.resume == "auto": util.mkdir_exists_ok(wandb.wandb_dir()) resume_path = os.path.join(wandb.wandb_dir(), RESUME_FNAME) with open(resume_path, "w") as f: f.write(json.dumps({"run_id": self.id})) if config is None: self.config = Config() else: self.config = config # socket server, currently only available in headless mode self.socket = None self.tags = tags if tags else [] self.sweep_id = sweep_id self._history = None self._events = None self._summary = None self._meta = None self._run_manager = None self._jupyter_agent = None
def get_list_transform(self, sd_responses): """return a transformed list from screendoor reponses """ permit_list = False responses_missing = [] sd_fields = { 'activity': 'dd8a5g7g', 'app_id': 'uqqrsogr', 'biz_name': 't00kheyd', 'dba_name': '60w4ep9y', 'addr': 'kbqz4189', 'parcel': 'kvrgbqrl' } if isinstance(sd_responses, list): permit_list = [] for resp in sd_responses: if (resp.get('responses', False) and resp['responses'].get(sd_fields['activity'], False) and (resp['responses'].get(sd_fields['biz_name'], False) or resp['responses'].get(sd_fields['dba_name'], False)) and (resp.get('status', '') in self.status_map.keys())): resp_status = self.status_map[resp.get('status')].lower() resp_referred = self.get_referred_departments( resp.get('labels')) item = { 'application_id': '', 'business_name': '', 'dba_name': '', 'address': '', 'parcel': '', 'status': resp_status, 'referred': ", ".join(resp_referred) } data = resp['responses'] item['application_id'] = str( data.get(sd_fields['app_id']) or '') if not data.get(sd_fields['app_id']): item['application_id'] = 'P-' + str(resp['id']) item['business_name'] = str( data.get(sd_fields['biz_name']) or '') item['dba_name'] = str( data.get(sd_fields['dba_name']) or item['business_name']) item['parcel'] = data.get(sd_fields['parcel'], '') if data.get(sd_fields['addr']) and data.get( sd_fields['addr']).get('street'): addr = data.get(sd_fields['addr']) item['address'] = str(addr.get('street') or '') item['address'] += ', ' + str(addr.get('city') or '') item['address'] += ', ' + str(addr.get('state') or '') item['address'] += ' ' + str(addr.get('zipcode') or '') item['address'] = item['address'].strip(' ,') if data[sd_fields['activity']] and data[ sd_fields['activity']]['checked']: for applied_permit_type in data[ sd_fields['activity']]['checked']: item[applied_permit_type.lower()] = resp_status permit_list.append(item) else: responses_missing.append({ 'id': resp['id'], 'sequential_id': resp['sequential_id'] }) with sentry_sdk.configure_scope() as scope: scope.set_extra('get_list_transform.permit_list_len', len(permit_list)) if responses_missing: scope.set_extra('get_list_transform.responses_missing', responses_missing) return permit_list
def dummy_task(): with configure_scope() as scope: scope.set_tag("foo", "bar") results.append(42)
def _request( self, method, path, headers=None, data=None, params=None, auth=None, json=True, allow_text=None, allow_redirects=None, timeout=None, ): if allow_text is None: allow_text = self.allow_text if allow_redirects is None: allow_redirects = self.allow_redirects if allow_redirects is None: # is still None allow_redirects = method.upper() == "GET" if timeout is None: timeout = 30 full_url = self.build_url(path) metrics.incr( u"%s.http_request" % self.datadog_prefix, sample_rate=1.0, tags={self.integration_type: self.name}, ) try: with sentry_sdk.configure_scope() as scope: parent_span_id = scope.span.span_id trace_id = scope.span.trace_id except AttributeError: parent_span_id = None trace_id = None with sentry_sdk.start_transaction( op=u"{}.http".format(self.integration_type), name=u"{}.http_response.{}".format(self.integration_type, self.name), parent_span_id=parent_span_id, trace_id=trace_id, sampled=True, ) as span: try: with build_session() as session: resp = getattr(session, method.lower())( url=full_url, headers=headers, json=data if json else None, data=data if not json else None, params=params, auth=auth, verify=self.verify_ssl, allow_redirects=allow_redirects, timeout=timeout, ) resp.raise_for_status() except ConnectionError as e: self.track_response_data("connection_error", span, e) raise ApiHostError.from_exception(e) except Timeout as e: self.track_response_data("timeout", span, e) raise ApiTimeoutError.from_exception(e) except HTTPError as e: resp = e.response if resp is None: self.track_response_data("unknown", span, e) self.logger.exception("request.error", extra={ self.integration_type: self.name, "url": full_url }) raise ApiError("Internal Error", url=full_url) self.track_response_data(resp.status_code, span, e) raise ApiError.from_response(resp, url=full_url) self.track_response_data(resp.status_code, span, None, resp) if resp.status_code == 204: return {} return BaseApiResponse.from_response(resp, allow_text=allow_text)
def tags_context(self, tags): with sentry_sdk.configure_scope() as scope: for k, v in tags.items(): scope.set_tag(k, v)
def init_sentry(self): """Add user-related scope information to sentry""" with configure_scope() as scope: # type: ignore scope.user = dict(username=self.username, email=self.get_setting('email'))
def _enable_errortracking(): # this is a bit hackish, but we want to enable error tracking as early in the platform lifecycle as possible # and hence can't wait until our implementation is initialized and injected with settings from octoprint.settings import settings global _enabled if _enabled: return version = get_octoprint_version_string() s = settings() plugin_defaults = dict(plugins=dict(errortracking=SETTINGS_DEFAULTS)) enabled = s.getBoolean(["plugins", "errortracking", "enabled"], defaults=plugin_defaults) enabled_unreleased = s.getBoolean(["plugins", "errortracking", "enabled_unreleased"], defaults=plugin_defaults) url_server = s.get(["plugins", "errortracking", "url_server"], defaults=plugin_defaults) unique_id = s.get(["plugins", "errortracking", "unique_id"], defaults=plugin_defaults) if unique_id is None: import uuid unique_id = str(uuid.uuid4()) s.set(["plugins", "errortracking", "unique_id"], unique_id, defaults=plugin_defaults) s.save() if _is_enabled(enabled, enabled_unreleased): import sentry_sdk from octoprint.plugin import plugin_manager def _before_send(event, hint): if not "exc_info" in hint: # we only want exceptions return None handled = True logger = event.get("logger", "") plugin = event.get("extra", dict()).get("plugin", None) for ignore in IGNORED_EXCEPTIONS: if isinstance(ignore, tuple): ignored_exc, matcher = ignore else: ignored_exc = ignore matcher = lambda *args: True exc = hint["exc_info"][1] if isinstance(exc, ignored_exc) and matcher(exc, logger, plugin): # exception ignored for logger return None elif isinstance(ignore, type): if isinstance(hint["exc_info"][1], ignore): # exception ignored return None if event.get("exception") and event["exception"].get("values"): handled = not any(map(lambda x: x.get("mechanism") and x["mechanism"].get("handled", True) == False, event["exception"]["values"])) if handled: # error is handled, restrict further based on logger if logger != "" and not (logger.startswith("octoprint.") or logger.startswith("tornado.")): # we only want errors logged by loggers octoprint.* or tornado.* return None if logger.startswith("octoprint.plugins."): plugin_id = logger.split(".")[2] plugin_info = plugin_manager().get_plugin_info(plugin_id) if plugin_info is None or not plugin_info.bundled: # we only want our active bundled plugins return None if plugin is not None: plugin_info = plugin_manager().get_plugin_info(plugin) if plugin_info is None or not plugin_info.bundled: # we only want our active bundled plugins return None return event sentry_sdk.init(url_server, release=version, before_send=_before_send) with sentry_sdk.configure_scope() as scope: scope.user = dict(id=unique_id) logging.getLogger("octoprint.plugins.errortracking").info("Initialized error tracking") _enabled = True
def main(): """Entry point""" from nipype import logging as nlogging from multiprocessing import set_start_method, Process, Manager from ..viz.reports import generate_reports from ..utils.bids import write_derivative_description set_start_method('forkserver') warnings.showwarning = _warn_redirect opts = get_parser().parse_args() exec_env = os.name # special variable set in the container if os.getenv('IS_DOCKER_8395080871'): exec_env = 'singularity' cgroup = Path('/proc/1/cgroup') if cgroup.exists() and 'docker' in cgroup.read_text(): exec_env = 'docker' if os.getenv('DOCKER_VERSION_8395080871'): exec_env = 'fmriprep-docker' sentry_sdk = None if not opts.notrack: import sentry_sdk from ..__about__ import __version__ environment = "prod" release = __version__ if not __version__: environment = "dev" release = "dev" elif bool(int(os.getenv('FMRIPREP_DEV', 0))) or ('+' in __version__): environment = "dev" def before_send(event, hints): # Filtering log messages about crashed nodes if 'logentry' in event and 'message' in event['logentry']: msg = event['logentry']['message'] if msg.startswith("could not run node:"): return None elif msg.startswith("Saving crash info to "): return None elif re.match("Node .+ failed to run on host .+", msg): return None if 'breadcrumbs' in event and isinstance(event['breadcrumbs'], list): fingerprints_to_propagate = ['no-disk-space', 'memory-error', 'permission-denied', 'keyboard-interrupt'] for bc in event['breadcrumbs']: msg = bc.get('message', 'empty-msg') if msg in fingerprints_to_propagate: event['fingerprint'] = [msg] break return event sentry_sdk.init("https://[email protected]/1137693", release=release, environment=environment, before_send=before_send) with sentry_sdk.configure_scope() as scope: scope.set_tag('exec_env', exec_env) if exec_env == 'fmriprep-docker': scope.set_tag('docker_version', os.getenv('DOCKER_VERSION_8395080871')) free_mem_at_start = round(psutil.virtual_memory().free / 1024**3, 1) scope.set_tag('free_mem_at_start', free_mem_at_start) scope.set_tag('cpu_count', cpu_count()) # Memory policy may have a large effect on types of errors experienced overcommit_memory = Path('/proc/sys/vm/overcommit_memory') if overcommit_memory.exists(): policy = {'0': 'heuristic', '1': 'always', '2': 'never'}.get(overcommit_memory.read_text().strip(), 'unknown') scope.set_tag('overcommit_memory', policy) if policy == 'never': overcommit_kbytes = Path('/proc/sys/vm/overcommit_memory') kb = overcommit_kbytes.read_text().strip() if kb != '0': limit = '{}kB'.format(kb) else: overcommit_ratio = Path('/proc/sys/vm/overcommit_ratio') limit = '{}%'.format(overcommit_ratio.read_text().strip()) scope.set_tag('overcommit_limit', limit) else: scope.set_tag('overcommit_limit', 'n/a') else: scope.set_tag('overcommit_memory', 'n/a') scope.set_tag('overcommit_limit', 'n/a') for k, v in vars(opts).items(): scope.set_tag(k, v) # Validate inputs if not opts.skip_bids_validation: print("Making sure the input data is BIDS compliant (warnings can be ignored in most " "cases).") validate_input_dir(exec_env, opts.bids_dir, opts.participant_label) # FreeSurfer license default_license = str(Path(os.getenv('FREESURFER_HOME')) / 'license.txt') # Precedence: --fs-license-file, $FS_LICENSE, default_license license_file = opts.fs_license_file or os.getenv('FS_LICENSE', default_license) if not os.path.exists(license_file): raise RuntimeError( 'ERROR: a valid license file is required for FreeSurfer to run. ' 'FMRIPREP looked for an existing license file at several paths, in this ' 'order: 1) command line argument ``--fs-license-file``; 2) ``$FS_LICENSE`` ' 'environment variable; and 3) the ``$FREESURFER_HOME/license.txt`` path. ' 'Get it (for free) by registering at https://' 'surfer.nmr.mgh.harvard.edu/registration.html') os.environ['FS_LICENSE'] = license_file # Retrieve logging level log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG)) # Set logging logger.setLevel(log_level) nlogging.getLogger('nipype.workflow').setLevel(log_level) nlogging.getLogger('nipype.interface').setLevel(log_level) nlogging.getLogger('nipype.utils').setLevel(log_level) errno = 0 # Call build_workflow(opts, retval) with Manager() as mgr: retval = mgr.dict() p = Process(target=build_workflow, args=(opts, retval)) p.start() p.join() if p.exitcode != 0: sys.exit(p.exitcode) fmriprep_wf = retval['workflow'] plugin_settings = retval['plugin_settings'] bids_dir = retval['bids_dir'] output_dir = retval['output_dir'] work_dir = retval['work_dir'] subject_list = retval['subject_list'] run_uuid = retval['run_uuid'] if not opts.notrack: with sentry_sdk.configure_scope() as scope: scope.set_tag('run_uuid', run_uuid) scope.set_tag('npart', len(subject_list)) retcode = retval['return_code'] if fmriprep_wf is None: sys.exit(1) if opts.write_graph: fmriprep_wf.write_graph(graph2use="colored", format='svg', simple_form=True) if opts.reports_only: sys.exit(int(retcode > 0)) if opts.boilerplate: sys.exit(int(retcode > 0)) # Sentry tracking if not opts.notrack: sentry_sdk.add_breadcrumb(message='fMRIPrep started', level='info') sentry_sdk.capture_message('fMRIPrep started', level='info') # Check workflow for missing commands missing = check_deps(fmriprep_wf) if missing: print("Cannot run fMRIPrep. Missing dependencies:") for iface, cmd in missing: print("\t{} (Interface: {})".format(cmd, iface)) sys.exit(2) # Clean up master process before running workflow, which may create forks gc.collect() try: fmriprep_wf.run(**plugin_settings) except RuntimeError as e: errno = 1 if "Workflow did not execute cleanly" not in str(e): sentry_sdk.capture_exception(e) raise finally: # Generate reports phase errno += generate_reports(subject_list, output_dir, work_dir, run_uuid, sentry_sdk=sentry_sdk) write_derivative_description(bids_dir, str(Path(output_dir) / 'fmriprep')) if not opts.notrack and errno == 0: sentry_sdk.capture_message('fMRIPrep finished without errors', level='info') sys.exit(int(errno > 0))
from sql_persistence import session from sql_persistence.interface import SQLPersistenceInterface from eth_manager.contract_registry.ABIs import (erc20_abi, bancor_converter_abi, bancor_network_abi) from eth_manager.eth_transaction_processor import EthTransactionProcessor from eth_manager.transaction_supervisor import TransactionSupervisor from eth_manager.task_manager import TaskManager from eth_manager.blockchain_sync.blockchain_sync import BlockchainSyncer import celery_utils sentry_sdk.init(config.SENTRY_SERVER_DSN, integrations=[CeleryIntegration()]) with configure_scope() as scope: scope.set_tag("domain", config.APP_HOST) chain_config = config.CHAINS[celery_utils.chain] from config import logg logg.info(f'Using chain {celery_utils.chain}') app = Celery('tasks', broker=config.REDIS_URL, backend=config.REDIS_URL, task_serializer='json') app.conf.beat_schedule = { "maintain_eth_balances": {
def build_one( version, git_branch, isdev, quick, venv, build_root, group="docs", log_directory="/var/log/docsbuild/", language=None, ): if not language: language = "en" if sentry_sdk: with sentry_sdk.configure_scope() as scope: scope.set_tag("version", repr(version)) scope.set_tag("language", language) checkout = os.path.join( build_root, str(version), "cpython-{lang}".format(lang=language) ) logging.info("Build start for version: %s, language: %s", str(version), language) sphinxopts = SPHINXOPTS[language].copy() sphinxopts.extend(["-q"]) if language != "en": gettext_language_tag = pep_545_tag_to_gettext_tag(language) locale_dirs = os.path.join(build_root, str(version), "locale") locale_clone_dir = os.path.join( locale_dirs, gettext_language_tag, "LC_MESSAGES" ) locale_repo = "https://github.com/python/python-docs-{}.git".format(language) git_clone( locale_repo, locale_clone_dir, translation_branch(locale_repo, locale_clone_dir, version), ) sphinxopts.extend( ( "-D locale_dirs={}".format(locale_dirs), "-D language={}".format(gettext_language_tag), "-D gettext_compact=0", ) ) git_clone("https://github.com/python/cpython.git", checkout, git_branch) maketarget = ( "autobuild-" + ("dev" if isdev else "stable") + ("-html" if quick else "") ) logging.info("Running make %s", maketarget) logname = "cpython-{lang}-{version}.log".format(lang=language, version=version) python = os.path.join(venv, "bin/python") sphinxbuild = os.path.join(venv, "bin/sphinx-build") blurb = os.path.join(venv, "bin/blurb") shell_out( [ "make", "-C", os.path.join(checkout, "Doc"), "PYTHON=" + python, "SPHINXBUILD=" + sphinxbuild, "BLURB=" + blurb, "VENVDIR=" + venv, "SPHINXOPTS=" + " ".join(sphinxopts), "SPHINXERRORHANDLING=", maketarget, ], logfile=os.path.join(log_directory, logname), ) shell_out(["chgrp", "-R", group, log_directory]) logging.info("Build done for version: %s, language: %s", str(version), language)
else: sentry_utils.MAX_STRING_LENGTH = sentry_max_length sentry_sdk.init( sentry_dsn, max_breadcrumbs=50, before_send=before_send, debug=False, environment=sentry_environment, ) configuration = getConfiguration() tags = {} instancehome = configuration.instancehome tags['instance_name'] = instancehome.rsplit(os.path.sep, 1)[-1] with sentry_sdk.configure_scope() as scope: for k, v in tags.items(): scope.set_tag(k, v) if sentry_project: scope.set_tag("project", sentry_project) logging.info("Sentry integration enabled") # fake registration in order to import the file properly # for the sentry_skd.init() call @adapter(IPubFailure) def dummy(event): pass
def main(): args = parse_args() if args.version: version_info() exit(0) if args.log_directory: args.log_directory = os.path.abspath(args.log_directory) if args.build_root: args.build_root = os.path.abspath(args.build_root) if args.www_root: args.www_root = os.path.abspath(args.www_root) if sys.stderr.isatty(): logging.basicConfig(format="%(levelname)s:%(message)s", stream=sys.stderr) else: logging.basicConfig( format="%(levelname)s:%(asctime)s:%(message)s", filename=os.path.join(args.log_directory, "docsbuild.log"), ) logging.root.setLevel(logging.DEBUG) venv = os.path.join(args.build_root, "venv") if args.branch: branches_to_do = [(args.branch, str(args.branch), args.devel)] else: branches_to_do = BRANCHES if not args.languages: # Allow "--languages" to build all languages (as if not given) # instead of none. "--languages en" builds *no* translation, # as "en" is the untranslated one. args.languages = LANGUAGES with ProcessPoolExecutor(max_workers=args.jobs) as executor: futures = [] for version, git_branch, devel in branches_to_do: for language in args.languages: futures.append( ( version, language, executor.submit( build_one, version, git_branch, devel, args.quick, venv, args.build_root, args.group, args.log_directory, language, ), ) ) wait([future[2] for future in futures], return_when=ALL_COMPLETED) for version, language, future in futures: if sentry_sdk: with sentry_sdk.configure_scope() as scope: scope.set_tag("version", repr(version)) scope.set_tag("language", language if language else "en") if future.exception(): logging.error( "Exception while building %s version %s: %s", language, version, future.exception(), ) if sentry_sdk: sentry_sdk.capture_exception(future.exception()) try: copy_build_to_webroot( args.build_root, version, language, args.group, args.quick, args.skip_cache_invalidation, args.www_root, ) except Exception as ex: logging.error( "Exception while copying to webroot %s version %s: %s", language, version, ex, ) if sentry_sdk: sentry_sdk.capture_exception(ex)
def main(): logging_choices= ('critical', 'error', 'warning', 'info', 'debug') logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, epilog='grouping by sentry uses the first line of the message') parser.add_argument('--verbose', action='store_true', default=True) parser.add_argument('--logger', default='sentrycat.main') parser.add_argument('--level', default='info', choices=logging_choices) parser.add_argument('--culprit', default='sentrycat.send_message') parser.add_argument('--server_name', default=socket.getfqdn()) parser.add_argument('--release', default='') parser.add_argument('--extra', default={}, action=JsonAction, help='a json dictionary of extra data') parser.add_argument('--tags', default={}, action=JsonAction, help='a json dictionary listening tag name and value') parser.add_argument('--request', default={}, action=JsonAction, help='a json dictionary of the request') parser.add_argument('--dsn', action=EnvDefault, envvar='SENTRY_DSN', required=True, help='specify a sentry dsn, will use env SENTRY_DSN if unset') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--mbox-message', type=exist_file, metavar='FILE', help='mbox filename to parse and send all') group.add_argument('--maildir-message', type=exist_dir, metavar='DIR', help='maildir directory to parse and send all') group.add_argument('--message', type=argparse.FileType(mode='r', encoding='utf-8'), dest='message_file', metavar='FILE', help='filename to read message from, use "-" for stdin') group.add_argument('message', nargs='?', help='the message string to be sent') args = parser.parse_args().__dict__ client = sentry_sdk.init({ 'dsn': args.pop('dsn'), 'release': args.pop('release'), 'server_name': args.pop('server_name'), 'send_default_pii': True, 'integrations': [ LoggingIntegration(), StdlibIntegration(), ExcepthookIntegration(), DedupeIntegration(), AtexitIntegration(), ThreadingIntegration(), ], 'default_integrations': False, }) if not client: print('Error: failed to initialize sentry_sdk', file=sys.stderr) sys.exit(1) with sentry_sdk.configure_scope() as scope: scope.level = args.pop('level') for k,v in args.pop('extra').items(): scope.set_extra(k, v) for k,v in args.pop('tags').items(): scope.set_tag(k, v) if args.get('mbox_message'): mbox_name=args.pop('mbox_message') send_mbox(mbox_name, args) elif args.get('maildir_message'): mbox_name=args.pop('maildir_message') send_maildir(mbox_name, args) else: if args.get('message_file'): msgfile_obj=args.pop('message_file') args['message']= msgfile_obj.read() eventid = send_message(args) sys.exit(0 if eventid else 1)
def __init__(self): self.config_file = os.environ.get('SYDENT_CONF', "sydent.conf") self.cfg = parse_config(self.config_file) log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s" " - %(message)s" ) formatter = logging.Formatter(log_format) logPath = self.cfg.get('general', "log.path") if logPath != '': handler = logging.handlers.TimedRotatingFileHandler( logPath, when='midnight', backupCount=365 ) handler.setFormatter(formatter) def sighup(signum, stack): logger.info("Closing log file due to SIGHUP") handler.doRollover() logger.info("Opened new log file due to SIGHUP") else: handler = logging.StreamHandler() handler.setFormatter(formatter) rootLogger = logging.getLogger('') rootLogger.setLevel(self.cfg.get('general', 'log.level')) rootLogger.addHandler(handler) logger.info("Starting Sydent server") self.pidfile = self.cfg.get('general', "pidfile.path"); observer = log.PythonLoggingObserver() observer.start() self.db = SqliteDatabase(self).db self.server_name = self.cfg.get('general', 'server.name') if self.server_name == '': self.server_name = os.uname()[1] logger.warn(("You had not specified a server name. I have guessed that this server is called '%s' " + " and saved this in the config file. If this is incorrect, you should edit server.name in " + "the config file.") % (self.server_name,)) self.cfg.set('general', 'server.name', self.server_name) self.save_config() if self.cfg.has_option("general", "sentry_dsn"): # Only import and start sentry SDK if configured. import sentry_sdk sentry_sdk.init( dsn=self.cfg.get("general", "sentry_dsn"), ) with sentry_sdk.configure_scope() as scope: scope.set_tag("sydent_server_name", self.server_name) if self.cfg.has_option("general", "prometheus_port"): import prometheus_client prometheus_client.start_http_server( port=self.cfg.getint("general", "prometheus_port"), addr=self.cfg.get("general", "prometheus_addr"), ) self.validators = Validators() self.validators.email = EmailValidator(self) self.validators.msisdn = MsisdnValidator(self) self.keyring = Keyring() self.keyring.ed25519 = SydentEd25519(self).signing_key self.keyring.ed25519.alg = 'ed25519' self.sig_verifier = Verifier(self) self.servlets = Servlets() self.servlets.v1 = V1Servlet(self) self.servlets.emailRequestCode = EmailRequestCodeServlet(self) self.servlets.emailValidate = EmailValidateCodeServlet(self) self.servlets.msisdnRequestCode = MsisdnRequestCodeServlet(self) self.servlets.msisdnValidate = MsisdnValidateCodeServlet(self) self.servlets.lookup = LookupServlet(self) self.servlets.bulk_lookup = BulkLookupServlet(self) self.servlets.pubkey_ed25519 = Ed25519Servlet(self) self.servlets.pubkeyIsValid = PubkeyIsValidServlet(self) self.servlets.ephemeralPubkeyIsValid = EphemeralPubkeyIsValidServlet(self) self.servlets.threepidBind = ThreePidBindServlet(self) self.servlets.threepidUnbind = ThreePidUnbindServlet(self) self.servlets.replicationPush = ReplicationPushServlet(self) self.servlets.getValidated3pid = GetValidated3pidServlet(self) self.servlets.storeInviteServlet = StoreInviteServlet(self) self.servlets.blindlySignStuffServlet = BlindlySignStuffServlet(self) self.threepidBinder = ThreepidBinder(self) self.sslComponents = SslComponents(self) self.clientApiHttpServer = ClientApiHttpServer(self) self.replicationHttpsServer = ReplicationHttpsServer(self) self.replicationHttpsClient = ReplicationHttpsClient(self) self.pusher = Pusher(self) # A dedicated validation session store just to clean up old sessions every N minutes self.cleanupValSession = ThreePidValSessionStore(self) cb = task.LoopingCall(self.cleanupValSession.deleteOldSessions) cb.start(10 * 60.0)