def mailgun_send(mailgun_data, files_dict=None): logger.debug("Mailgun send: %s" % mailgun_data) logger.debug("Mailgun files: %s" % files_dict) if not settings.MAILGUN_API_KEY: capture_message("Mailgun API key is not defined.") return HttpResponse(status=500) if not settings.MAILGUN_CAUTION_SEND_REAL_MAIL: # We will see this message in the mailgun logs but nothing will # actually be delivered. This gets added at the end so it can't be # overwritten by other functions. mailgun_data["o:testmode"] = "yes" logger.debug("mailgun_send: o:testmode=%s" % mailgun_data["o:testmode"]) try: resp = requests.post("https://api.mailgun.net/v2/%s/messages" % settings.LIST_DOMAIN, auth=("api", settings.MAILGUN_API_KEY), data=mailgun_data, files=files_dict ) if resp.status_code != 200: capture_message('Mailgun POST returned %d' % resp.status_code) return HttpResponse(status=resp.status_code) except requests.ConnectionError as e: logger.error('Connection error. Email "%s" aborted.' % mailgun_data['subject']) capture_exception(e) return HttpResponse(status=500)
def post_post_as_tweet(post): """ Takes a post and formats it as a tweet, then sends the tweet to Twitter """ status = "" post_url = URL_STR.format(post.slug) if post.comment: if len(post.comment) <= CHAR_LIMIT: status = post.comment else: status = post.comment[:CHAR_LIMIT] + "…" status += "\n\n" status += post_url else: status = post_url if post.media: try: return twitter_api.PostUpdate(status, media=process_media(post.media)[:4]) except Exception as e: capture_exception(e) return twitter_api.PostUpdate(status) return twitter_api.PostUpdate(status)
def update_users_feeds(user_ids: List[int]) -> None: """ Updates RSS feeds for a list of Users. :param user_ids: List of User Ids :return: None """ users = get_users_from_ids(user_ids) if not users: return success_count = 0 feed_urls = [] for user in users: try: user_feed, feed_url = update_user_feed(user) db.session.add(user) feed_urls.append(feed_url) except Exception as e: capture_exception(e) app.logger.exception("Error creating RSS for %s: %s", user, e) else: success_count += 1 app.logger.info("Updated RSS feeds for %d Users.", success_count)
def post(self, **kwargs): """Base endpoint""" file = request.files["file"] try: media = Media(file=file) except InvalidMediaTypeException as e: return { "message": str(e) }, 400 optimized_file_names = media.optimize() for file_name in optimized_file_names: upload(file_name) os.remove(file_name) media.url = CDN_URI.format(optimized_file_names[0]) media.url_optimized = CDN_URI.format(optimized_file_names[1]) if len(optimized_file_names) is 3: media.url_poster = CDN_URI.format(optimized_file_names[2]) media.showcase = request.args.get("showcase") is not None try: db.session.add(media) db.session.commit() except Exception as e: capture_exception(e) return { "message": "An unexpected error occoured while writing to the database" }, 400 return media.to_dict(), 201
def get_contributors(): key = 'wlweb-contributors' results = cache.get(key) if results is not None: return results # Perform request try: response = requests.get(WEBLATE_CONTRIBUTORS_URL) except IOError as error: sentry_sdk.capture_exception(error) response = None # Stats are not yet calculated if response is None or response.status_code != 200: return [] stats = response.json() # Fill in ranking. This seems to best reflect people effort, but still # is not accurate at all. The problem is that commits stats are # misleading due to high number of commits generated by old Weblate # versions. Additions are heavily biased by adding new translation files. for stat in stats: if stat['author']['login'] in EXCLUDE_USERS: stat['rank'] = 0 continue stat['rank'] = 8 * stat['total'] + sum( (week['a'] + week['d'] for week in stat['weeks']) ) stats.sort(key=lambda x: -x['rank']) cache.set(key, stats[:10], timeout=3600) return stats[:10]
async def on_command_error(self, ctx, error): """Whenever a command fails""" log.error(error) traceback.print_exc() cmd = ctx.command.qualified_name.replace(' ', '_') sentry_sdk.capture_exception(error) self.commands_failed[cmd] += 1
def published(self, item: Dict) -> datetime: """ Gets a published datetime for an item. Try published value. Default to utcnow. :param item: deserialized JSON item :type item: Dict :return: datetime """ published = None date_published = item.get("date_published", "") if date_published: try: published = self.get_datetime_and_remove_timezone(date_published) except Exception as e: capture_exception(e) app.logger.exception( "Exception getting published date from date_published " "for item %s from %s: %s", self.item_id, self.feed, e, ) if not published: published = datetime.utcnow() return published
def log_exception(): """ Log an exception that has been raised to the console and Sentry. Must be called within an `except ...:` statement. """ traceback.print_exc() if settings.SENTRY_DSN: capture_exception()
def report_exception(exc=None): """ Report an exception to the error tracking service. If the given ``exc`` is :obj:`None` then the most recently raised exception will be reported. :arg exc: the exception to report :type exc: :class:`Exception`, :obj:`None`, or a :func:`sys.exc_info` tuple """ sentry_sdk.capture_exception(exc)
def load_extensions(self, cogs=None, path='cogs.'): """Load cogs into the bot.""" base_extensions = [x.replace('.py', '') for x in os.listdir('cogs') if x.endswith('.py')] for extension in cogs or base_extensions: try: self.load_extension(f'{path}{extension}') log.info(f'Loaded extension {extension}') except Exception as e: log.info(f'Unable to load extension {extension}. Err: {e}') traceback.print_exc() sentry_sdk.capture_exception(e)
def save_feed_urls_to_session(feed_info_list: List[Dict]) -> List[str]: """ Save List of Urls of StatusFeedInfo List to the current Session :param feed_info_list: List of JSON dicts :return: List of Urls as str """ try: url_list = list(f.get("url") for f in feed_info_list) session[SESSION_FEED_URLS] = url_list return url_list except Exception as e: capture_exception(e) session[SESSION_FEED_URLS] = None app.logger.exception("Failed to load Url list to Session. Exception: %s", e) return []
def deserialize_feed_info(json_feed_info: List[Dict]) -> List[StatusFeedInfo]: """ Deserialize a List of JSON valid Dicts into a List of StatusFeedInfo objects :param json_feed_info: List of JSON Dicts :return: List of StatusFeedInfo """ try: return FEED_INFO_SCHEMA.load(json_feed_info, partial=True) except ValidationError as e: capture_exception(e) app.logger.exception( "Failed to load StatusFeedInfo from JSON: %s, Exception: %s", json_feed_info, e, ) return []
def serialise_feed_info(feed_info_list: List[StatusFeedInfo]) -> Dict: """ Serialize a List of StatusFeedInfo objects to a JSON valid Dictionary :param feed_info_list: List of StatusFeedInfo :return: JSON Dict """ try: return FEED_INFO_SCHEMA.dump(feed_info_list) except ValidationError as e: capture_exception(e) app.logger.exception( "Failed to dump StatusFeedInfo list to JSON: %s, Exception: %s", feed_info_list, e, ) return dict()
def save_feed_info_to_session(dump: List[Dict]) -> bool: """ Save JSON StatusFeedInfo List to the current Session :param dump: List of JSON Dicts :return: Bool """ if not dump: return False try: session[SESSION_FEED_INFO] = dump return True except Exception as e: capture_exception(e) session[SESSION_FEED_INFO] = None app.logger.exception( "Failed to load StatusFeedInfo dump to Session: %s, Exception: %s", dump, e, ) return False
def captureException(self, exc_info=None, **kwargs): with sentry_sdk.push_scope() as scope: self._kwargs_into_scope(scope, **kwargs) return capture_exception(exc_info)
def post(self, request): try: medium = request.data.get("medium", False) password = request.data.get("password", False) ## Raise exception if any of the above are missing if not medium or not password: capture_message("Sign in endpoint missing medium data") return Response( { "error": "Something went wrong. Please try again later or contact the support team." }, status=status.HTTP_400_BAD_REQUEST, ) if medium == "email": if not request.data.get( "email", False ) or not check_valid_email_address( request.data.get("email").strip().lower() ): return Response( {"error": "Please provide a valid email address."}, status=status.HTTP_400_BAD_REQUEST, ) email = request.data.get("email").strip().lower() if email is None: return Response( {"error": "Please provide a valid email address."}, status=status.HTTP_400_BAD_REQUEST, ) user = User.objects.get(email=email) elif medium == "mobile": if not request.data.get( "mobile", False ) or not check_valid_phone_number( request.data.get("mobile").strip().lower() ): return Response( {"error": "Please provide a valid mobile number."}, status=status.HTTP_400_BAD_REQUEST, ) mobile_number = request.data.get("mobile").strip().lower() if mobile_number is None: return Response( {"error": "Please provide a valid mobile number"}, status=status.HTTP_400_BAD_REQUEST, ) user = User.objects.get( mobile_number=mobile_number ) else: capture_message("Sign in endpoint wrong medium data") return Response( { "error": "Something went wrong. Please try again later or contact the support team." }, status=status.HTTP_400_BAD_REQUEST, ) # send if it finds two? to logger if user is None: return Response( { "error": "Sorry, we could not find a user with the provided credentials. Please try again." }, status=status.HTTP_400_BAD_REQUEST, ) if not user.check_password(password): return Response( { "error": "Sorry, we could not find a user with the provided credentials. Please try again." }, status=status.HTTP_403_FORBIDDEN, ) if not user.is_active: return Response( { "error": "Your account has been deactivated. Please contact your site administrator." }, status=status.HTTP_403_FORBIDDEN, ) serialized_user = UserSerializer(user).data # settings last active for the user user.last_active = timezone.now() user.last_login_time = timezone.now() user.last_login_ip = request.META.get("REMOTE_ADDR") user.last_login_medium = medium user.last_login_uagent = request.META.get("HTTP_USER_AGENT") user.token_updated_at = timezone.now() user.save() access_token, refresh_token = get_tokens_for_user(user) data = { "access_token": access_token, "refresh_token": refresh_token, "user": serialized_user, } return Response(data, status=status.HTTP_200_OK) except User.DoesNotExist: return Response( { "error": "Sorry, we could not find a user with the provided credentials. Please try again." }, status=status.HTTP_400_BAD_REQUEST, ) except Exception as e: capture_exception(e) return Response( { "error": "Something went wrong. Please try again later or contact the support team." }, status=status.HTTP_400_BAD_REQUEST, )
def create(**kwargs): """ Creates a new certificate. """ try: cert_body, private_key, cert_chain, external_id, csr = mint(**kwargs) except Exception: log_data = { "message": "Exception minting certificate", "issuer": kwargs["authority"].name, "cn": kwargs["common_name"], } current_app.logger.error(log_data, exc_info=True) capture_exception() raise kwargs["body"] = cert_body kwargs["private_key"] = private_key kwargs["chain"] = cert_chain kwargs["external_id"] = external_id kwargs["csr"] = csr roles = create_certificate_roles(**kwargs) if kwargs.get("roles"): kwargs["roles"] += roles else: kwargs["roles"] = roles if cert_body: cert = Certificate(**kwargs) kwargs["creator"].certificates.append(cert) else: # ACME path cert = PendingCertificate(**kwargs) kwargs["creator"].pending_certificates.append(cert) cert.authority = kwargs["authority"] database.commit() if isinstance(cert, Certificate): certificate_issued.send(certificate=cert, authority=cert.authority) metrics.send( "certificate_issued", "counter", 1, metric_tags=dict(owner=cert.owner, issuer=cert.issuer), ) log_data = { "function": "lemur.certificates.service.create", "owner": cert.owner, "name": cert.name, "serial": cert.serial, "issuer": cert.issuer, "not_after": cert.not_after.format('YYYY-MM-DD HH:mm:ss'), "not_before": cert.not_before.format('YYYY-MM-DD HH:mm:ss'), "sans": str(', '.join([domain.name for domain in cert.domains])), } current_app.logger.info(log_data) if isinstance(cert, PendingCertificate): # We need to refresh the pending certificate to avoid "Instance is not bound to a Session; " # "attribute refresh operation cannot proceed" pending_cert = database.session_query(PendingCertificate).get(cert.id) from lemur.common.celery import fetch_acme_cert if not current_app.config.get("ACME_DISABLE_AUTORESOLVE", False): fetch_acme_cert.apply_async( (pending_cert.id, kwargs.get("async_reissue_notification_cert_id", None)), countdown=5) return cert
def _dispatch_call_inner( self, fn_name: str, should_compare: Union[bool, Callable[[Any], bool]], rollup: Optional[int], organization: Optional[Organization], schema: Optional[Schema], *args: Any, ) -> ReleaseHealthResult: if rollup is None: rollup = 0 # force exact date comparison if not specified sessions_fn = getattr(self.sessions, fn_name) set_tag("releasehealth.duplex.rollup", str(rollup)) set_tag("releasehealth.duplex.method", fn_name) set_tag("releasehealth.duplex.org_id", str(getattr(organization, "id"))) set_extra("function_args", args) # Make sure we always know all function args tags = {"method": fn_name, "rollup": str(rollup)} with timer("releasehealth.sessions.duration", tags=tags, sample_rate=1.0): ret_val = sessions_fn(*args) if organization is None or not features.has( "organizations:release-health-check-metrics", organization): return ret_val # cannot check feature without organization set_context( "release-health-duplex-sessions", { "sessions": ret_val, }, ) try: # We read from the metrics source even if there is no need to compare. metrics_fn = getattr(self.metrics, fn_name) with timer("releasehealth.metrics.duration", tags=tags, sample_rate=1.0): metrics_val = metrics_fn(*args) if not isinstance(should_compare, bool): # should compare depends on the session result # evaluate it now should_compare = should_compare(ret_val) incr( "releasehealth.metrics.check_should_compare", tags={ "should_compare": str(should_compare), **tags }, sample_rate=1.0, ) if not should_compare: return ret_val copy = deepcopy(ret_val) set_context("release-health-duplex-metrics", {"metrics": metrics_val}) with timer("releasehealth.results-diff.duration", tags=tags, sample_rate=1.0): errors = compare_results(copy, metrics_val, rollup, None, schema) set_context("release-health-duplex-errors", {"errors": [str(error) for error in errors]}) should_report = features.has( "organizations:release-health-check-metrics-report", organization) incr( "releasehealth.metrics.compare", tags={ "has_errors": str(bool(errors)), "reported": str(should_report), **tags }, sample_rate=1.0, ) if errors and should_report: tag_delta(errors, tags) # We heavily rely on Sentry's message sanitization to properly deduplicate this capture_message( f"{fn_name} - Release health metrics mismatch: {errors[0]}" ) except Exception: capture_exception() should_compare = False incr( "releasehealth.metrics.crashed", tags=tags, sample_rate=1.0, ) return ret_val
def do_this(): add_breadcrumb(message="Hello", hint={"foo": 42}) try: raise ValueError("aha!") except Exception: capture_exception()
def send_failure_alerts( self, is_warning: bool, print_paused: bool, printer: Printer, print_: Print, img_url: str, extra_context: Optional[Dict] = None, plugin_names: Tuple[str, ...] = (), fail_silently: bool = True, ) -> None: try: mobile_notifications.send_failure_alert(printer, img_url, is_warning, print_paused) except Exception: capture_exception() if plugin_names: names = list(set(self.notification_plugin_names()) & set(plugin_names)) else: names = self.notification_plugin_names() # select matching, enabled & configured nsettings = list(NotificationSetting.objects.filter( user_id=printer.user_id, enabled=True, name__in=names, notify_on_failure_alert=True )) if not nsettings: LOGGER.debug("no matching NotificationSetting objects, ignoring failure alert") return user_ctx = self.get_user_context(printer.user) printer_ctx = self.get_printer_context(printer) print_ctx = self.get_print_context(print_) for nsetting in nsettings: LOGGER.debug(f'forwarding failure alert to plugin "{nsetting.name}" (pk: {nsetting.pk})') try: plugin = self.notification_plugin_by_name(nsetting.name) if not plugin: continue context = FailureAlertContext( config=nsetting.config, user=user_ctx, printer=printer_ctx, print=print_ctx, is_warning=is_warning, print_paused=print_paused, extra_context=extra_context, img_url=img_url, ) self._send_failure_alert(nsetting=nsetting, context=context) except NotImplementedError: pass except Exception: if fail_silently: LOGGER.exception('send_failure_alert plugin error') capture_exception() else: raise
def resfinder_redmine(redmine_instance, issue, work_dir, description): sentry_sdk.init(SENTRY_DSN, before_send=before_send) # Unpickle Redmine objects redmine_instance = pickle.load(open(redmine_instance, 'rb')) issue = pickle.load(open(issue, 'rb')) description = pickle.load(open(description, 'rb')) try: # Parse description to figure out what SEQIDs we need to run on. seqids = list() for item in description: item = item.upper() seqids.append(item) retrieve_nas_files(seqids=seqids, outdir=work_dir, filetype='fasta', copyflag=False) missing_fastas = verify_fasta_files_present(seqids, work_dir) if missing_fastas: redmine_instance.issue.update(resource_id=issue.id, notes='WARNING: Could not find the following requested SEQIDs on' ' the OLC NAS: {}'.format(missing_fastas)) # Use the COWBAT_DATABASES variable as the database path db_path = COWBAT_DATABASES # Run ResFindr cmd = 'GeneSeekr blastn -s {seqfolder} -t {targetfolder} -r {reportdir} -A'\ .format(seqfolder=work_dir, targetfolder=os.path.join(db_path, 'resfinder'), reportdir=os.path.join(work_dir, 'reports')) # Update the issue with the ResFinder command redmine_instance.issue.update(resource_id=issue.id, notes='ResFinder command:\n {cmd}'.format(cmd=cmd)) os.system(cmd) # These unfortunate hard coded paths appear to be necessary activate = 'source /home/ubuntu/miniconda3/bin/activate /mnt/nas2/virtual_environments/cowbat' # Run sipprverse with the necessary arguments mob_cmd = 'python -m spadespipeline.mobrecon -s {seqfolder} -r {targetfolder}' \ .format(seqfolder=work_dir, targetfolder=os.path.join(db_path, 'mobrecon')) # Update the issue with the MOB Recon command redmine_instance.issue.update(resource_id=issue.id, notes='MOB Recon command:\n {cmd}'.format(cmd=mob_cmd)) # Create another shell script to execute within the PlasmidExtractor conda environment template = "#!/bin/bash\n{} && {}".format(activate, mob_cmd) mob_script = os.path.join(work_dir, 'run_mob_recon.sh') with open(mob_script, 'w+') as file: file.write(template) # Modify the permissions of the script to allow it to be run on the node make_executable(mob_script) # Run shell script os.system(mob_script) # Get the output file uploaded. output_list = list() output_dict = dict() # Add the three reports separately to the output list output_dict['path'] = os.path.join(work_dir, 'reports', 'resfinder_blastn.xlsx') output_dict['filename'] = 'resfinder_blastn.xlsx' output_list.append(output_dict) output_dict = dict() output_dict['path'] = os.path.join(work_dir, 'reports', 'mob_recon_summary.csv') output_dict['filename'] = 'mob_recon_summary.csv' output_list.append(output_dict) output_dict = dict() output_dict['path'] = os.path.join(work_dir, 'reports', 'amr_summary.csv') output_dict['filename'] = 'amr_summary.csv' output_list.append(output_dict) redmine_instance.issue.update(resource_id=issue.id, uploads=output_list, status_id=4, notes='resfinder process complete!') # Clean up all FASTA/FASTQ files so we don't take up too much space on the NAS os.system('rm {workdir}/*fasta'.format(workdir=work_dir)) try: # Remove all other folders for dirpath, dirnames, filenames in os.walk(work_dir): for dirname in dirnames: shutil.rmtree(os.path.join(dirpath, dirname)) except IOError: pass except Exception as e: sentry_sdk.capture_exception(e) redmine_instance.issue.update(resource_id=issue.id, notes='Something went wrong! We log this automatically and will look into the ' 'problem and get back to you with a fix soon.')
gzip_s3_path = f'{s3_path}.gz' # s3_utils.upload_to_s3(local_file_name, s3_path) s3_utils.upload_to_s3(gzip_file_name, gzip_s3_path) logging.info(f'Deleting local file: {local_file_name}') os.remove(local_file_name) os.remove(gzip_file_name) db_utils.drop_archive_table(archive_db_name, archive_table_name) return None def compress_to_gzip(local_file_name): gzip_file_name = f'{local_file_name}.gz' fp = open(local_file_name, 'rb') with gzip.open(gzip_file_name, 'wb') as gz_fp: gz_fp.write(bytearray(fp.read())) return gzip_file_name if __name__ == '__main__': sentry_sdk.init(dsn=sentry_dsn) try: start_archival() except Exception as e: sentry_sdk.capture_exception(e) raise e
def custom_exception_handler(exc, context): # First the get response by django rest framework response = exception_handler(exc, context) # For 500 errors, we create new response if not response: request = context.get('request') if request and request.user and request.user.id: with sentry_sdk.configure_scope() as scope: scope.user = { 'id': request.user.id, 'email': request.user.email, } scope.set_extra('is_superuser', request.user.is_superuser) sentry_sdk.capture_exception() response = Response({}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) # Empty the response body but keep the headers response.data = {} # Timestamp of exception response.data['timestamp'] = timezone.now() if isinstance(exc, (exceptions.NotAuthenticated, )): response.status_code = status.HTTP_401_UNAUTHORIZED elif hasattr(exc, 'status_code'): response.status_code = exc.status_code if hasattr(exc, 'code'): # If the raised exception defines a code, send it as # internal error code response.data['error_code'] = exc.code elif hasattr(exc, 'get_codes'): # Otherwise, try to map the exception.get_codes() value to an # internal error code. # If no internal code available, return http status code as # internal error code by default. response.data['error_code'] = map_error_codes(exc.get_codes(), response.status_code) else: response.data['error_code'] = response.status_code # Error message can be defined by the exception as message # or detail attributres # Otherwise, it is simply the stringified exception. errors = None user_error = None if hasattr(exc, 'message'): errors = exc.message elif hasattr(exc, 'detail'): if type(exc.detail) is list: errors = [str(error) for error in exc.detail] else: errors = exc.detail elif hasattr(exc, 'default_detail'): errors = exc.default_detail elif response.status_code == 404: errors = 'Resource not found' else: errors = str(exc) user_error = standard_error_string if hasattr(exc, 'user_message'): user_error = exc.user_message # Wrap up string error inside non-field-errors if isinstance(errors, str): errors = { 'non_field_errors': [errors], } elif isinstance(errors, list) and all( [isinstance(error, str) for error in errors]): errors = { 'non_field_errors': errors, } if user_error: errors['internal_non_field_errors'] = errors.get('non_field_errors') errors['non_field_errors'] = [user_error] response.data['errors'] = errors # If there is a link available for the exception, # send back the link as well. if hasattr(exc, 'link'): response.data['link'] = exc.link # Logging if any([isinstance(exc, exception) for exception in WARN_EXCEPTIONS]): logger.warning('API Exception Warning!!', exc_info=True) else: logger.error( '{}.{}'.format(type(exc).__module__, type(exc).__name__), exc_info=True, extra={'request': context.get('request')}, ) return response
Tag, Agent, AgentScript, ScopeLog, UserInvitation, ) from app.instrumentation import initialize_sentryio from sentry_sdk import capture_exception from config import Config config = Config() initialize_sentryio(config) try: app = create_app(config_class=config, load_config=True) except Exception as e: capture_exception(e) raise e @app.shell_context_processor def make_shell_context(): return { "db": db, "User": User, "ScopeItem": ScopeItem, "ConfigItem": ConfigItem, "NatlasServices": NatlasServices, "AgentConfig": AgentConfig, "RescanTask": RescanTask, "Tag": Tag, "Agent": Agent,
def handle_exception(e): sentry_sdk.capture_exception()
def get(self, request, organization): if not features.has("organizations:discover-basic", organization, actor=request.user): return Response(status=404) with sentry_sdk.start_span(op="discover.endpoint", description="filter_params") as span: span.set_tag("organization", organization) try: params = self.get_filter_params(request, organization) except NoProjects: return Response([]) params["organization_id"] = organization.id has_global_views = features.has( "organizations:global-views", organization, actor=request.user ) if not has_global_views and len(params.get("project_id", [])) > 1: raise ParseError(detail="You cannot view events from multiple projects.") def data_fn(offset, limit): return discover.query( selected_columns=request.GET.getlist("field")[:], query=request.GET.get("query"), params=params, reference_event=self.reference_event( request, organization, params.get("start"), params.get("end") ), orderby=self.get_orderby(request), offset=offset, limit=limit, referrer=request.GET.get("referrer", "api.organization-events-v2"), auto_fields=True, use_aggregate_conditions=True, ) try: return self.paginate( request=request, paginator=GenericOffsetPaginator(data_fn=data_fn), on_results=lambda results: self.handle_results_with_meta( request, organization, params["project_id"], results ), ) except (discover.InvalidSearchQuery, snuba.QueryOutsideRetentionError) as error: raise ParseError(detail=six.text_type(error)) except snuba.QueryIllegalTypeOfArgument: raise ParseError(detail="Invalid query. Argument to function is wrong type.") except snuba.SnubaError as error: message = "Internal error. Please try again." if isinstance( error, ( snuba.RateLimitExceeded, snuba.QueryMemoryLimitExceeded, snuba.QueryTooManySimultaneous, ), ): message = "Query timeout. Please try again. If the problem persists try a smaller date range or fewer projects." elif isinstance( error, ( snuba.UnqualifiedQueryError, snuba.QueryExecutionError, snuba.SchemaValidationError, ), ): sentry_sdk.capture_exception(error) message = "Internal error. Your query failed to run." raise ParseError(detail=message)
def main(): """Entry point""" from nipype import logging as nlogging from multiprocessing import set_start_method, Process, Manager from ..viz.reports import generate_reports from ..utils.bids import write_derivative_description set_start_method('forkserver') warnings.showwarning = _warn_redirect opts = get_parser().parse_args() exec_env = os.name # special variable set in the container if os.getenv('IS_DOCKER_8395080871'): exec_env = 'singularity' cgroup = Path('/proc/1/cgroup') if cgroup.exists() and 'docker' in cgroup.read_text(): exec_env = 'docker' if os.getenv('DOCKER_VERSION_8395080871'): exec_env = 'fmriprep-docker' sentry_sdk = None if not opts.notrack: import sentry_sdk from ..__about__ import __version__ environment = "prod" release = __version__ if not __version__: environment = "dev" release = "dev" elif bool(int(os.getenv('FMRIPREP_DEV', 0))) or ('+' in __version__): environment = "dev" def before_send(event, hints): # Filtering log messages about crashed nodes if 'logentry' in event and 'message' in event['logentry']: msg = event['logentry']['message'] if msg.startswith("could not run node:"): return None elif msg.startswith("Saving crash info to "): return None elif re.match("Node .+ failed to run on host .+", msg): return None if 'breadcrumbs' in event and isinstance(event['breadcrumbs'], list): fingerprints_to_propagate = [ 'no-disk-space', 'memory-error', 'permission-denied', 'keyboard-interrupt' ] for bc in event['breadcrumbs']: msg = bc.get('message', 'empty-msg') if msg in fingerprints_to_propagate: event['fingerprint'] = [msg] break return event sentry_sdk.init( "https://[email protected]/1137693", release=release, environment=environment, before_send=before_send) with sentry_sdk.configure_scope() as scope: scope.set_tag('exec_env', exec_env) if exec_env == 'fmriprep-docker': scope.set_tag('docker_version', os.getenv('DOCKER_VERSION_8395080871')) dset_desc_path = opts.bids_dir / 'dataset_description.json' if dset_desc_path.exists(): desc_content = dset_desc_path.read_bytes() scope.set_tag('dset_desc_sha256', hashlib.sha256(desc_content).hexdigest()) free_mem_at_start = round(psutil.virtual_memory().free / 1024**3, 1) scope.set_tag('free_mem_at_start', free_mem_at_start) scope.set_tag('cpu_count', cpu_count()) # Memory policy may have a large effect on types of errors experienced overcommit_memory = Path('/proc/sys/vm/overcommit_memory') if overcommit_memory.exists(): policy = { '0': 'heuristic', '1': 'always', '2': 'never' }.get(overcommit_memory.read_text().strip(), 'unknown') scope.set_tag('overcommit_memory', policy) if policy == 'never': overcommit_kbytes = Path('/proc/sys/vm/overcommit_memory') kb = overcommit_kbytes.read_text().strip() if kb != '0': limit = '{}kB'.format(kb) else: overcommit_ratio = Path( '/proc/sys/vm/overcommit_ratio') limit = '{}%'.format( overcommit_ratio.read_text().strip()) scope.set_tag('overcommit_limit', limit) else: scope.set_tag('overcommit_limit', 'n/a') else: scope.set_tag('overcommit_memory', 'n/a') scope.set_tag('overcommit_limit', 'n/a') for k, v in vars(opts).items(): scope.set_tag(k, v) # Validate inputs if not opts.skip_bids_validation: print( "Making sure the input data is BIDS compliant (warnings can be ignored in most " "cases).") validate_input_dir(exec_env, str(opts.bids_dir), opts.participant_label) # FreeSurfer license default_license = str(Path(os.getenv('FREESURFER_HOME')) / 'license.txt') # Precedence: --fs-license-file, $FS_LICENSE, default_license license_file = opts.fs_license_file or os.getenv('FS_LICENSE', default_license) if not os.path.exists(license_file): raise RuntimeError( 'ERROR: a valid license file is required for FreeSurfer to run. ' 'FMRIPREP looked for an existing license file at several paths, in this ' 'order: 1) command line argument ``--fs-license-file``; 2) ``$FS_LICENSE`` ' 'environment variable; and 3) the ``$FREESURFER_HOME/license.txt`` path. ' 'Get it (for free) by registering at https://' 'surfer.nmr.mgh.harvard.edu/registration.html') os.environ['FS_LICENSE'] = license_file # Retrieve logging level log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG)) # Set logging logger.setLevel(log_level) nlogging.getLogger('nipype.workflow').setLevel(log_level) nlogging.getLogger('nipype.interface').setLevel(log_level) nlogging.getLogger('nipype.utils').setLevel(log_level) errno = 0 # Call build_workflow(opts, retval) with Manager() as mgr: retval = mgr.dict() p = Process(target=build_workflow, args=(opts, retval)) p.start() p.join() retcode = p.exitcode or retval.get('return_code', 0) bids_dir = retval.get('bids_dir') output_dir = retval.get('output_dir') work_dir = retval.get('work_dir') plugin_settings = retval.get('plugin_settings', None) subject_list = retval.get('subject_list', None) fmriprep_wf = retval.get('workflow', None) run_uuid = retval.get('run_uuid', None) if opts.reports_only: sys.exit(int(retcode > 0)) if opts.boilerplate: sys.exit(int(retcode > 0)) if fmriprep_wf and opts.write_graph: fmriprep_wf.write_graph(graph2use="colored", format='svg', simple_form=True) retcode = retcode or int(fmriprep_wf is None) if retcode != 0: sys.exit(retcode) # Check workflow for missing commands missing = check_deps(fmriprep_wf) if missing: print("Cannot run fMRIPrep. Missing dependencies:") for iface, cmd in missing: print("\t{} (Interface: {})".format(cmd, iface)) sys.exit(2) # Clean up master process before running workflow, which may create forks gc.collect() # Sentry tracking if not opts.notrack: with sentry_sdk.configure_scope() as scope: if run_uuid: scope.set_tag('run_uuid', run_uuid) if subject_list: scope.set_tag('npart', len(subject_list)) sentry_sdk.add_breadcrumb(message='fMRIPrep started', level='info') sentry_sdk.capture_message('fMRIPrep started', level='info') try: fmriprep_wf.run(**plugin_settings) except RuntimeError as e: errno = 1 if "Workflow did not execute cleanly" not in str(e): sentry_sdk.capture_exception(e) raise else: if opts.run_reconall: from templateflow import api from niworkflows.utils.misc import _copy_any dseg_tsv = str( api.get('fsaverage', suffix='dseg', extensions=['.tsv'])) _copy_any( dseg_tsv, str(Path(output_dir) / 'fmriprep' / 'desc-aseg_dseg.tsv')) _copy_any( dseg_tsv, str(Path(output_dir) / 'fmriprep' / 'desc-aparcaseg_dseg.tsv')) logger.log(25, 'fMRIPrep finished without errors') finally: # Generate reports phase errno += generate_reports(subject_list, output_dir, work_dir, run_uuid, sentry_sdk=sentry_sdk) write_derivative_description(bids_dir, str(Path(output_dir) / 'fmriprep')) if not opts.notrack and errno == 0: sentry_sdk.capture_message('fMRIPrep finished without errors', level='info') sys.exit(int(errno > 0))
def test_sentry(self): """Test Sentry by creating an error and having the DSN capture it""" try: 1 / 0 except ZeroDivisionError as e: sentry_sdk.capture_exception(e)
def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as exc: logging.exception(str(exc)) capture_exception(exc)
def request_science_topic_response(vars): try: # get_unused_topics science_topics_names = local_utils.get_unused_topics(vars) if not science_topics_names: state_utils.set_can_continue(vars, MUST_CONTINUE) state_utils.set_confidence(vars, confidence=CONF_100) next_index = state_utils.get_unrepeatable_index_from_rand_seq( vars, "nice_chat_acks", len(NICE_CHAT_ACKS), True, ) ack = f"{NICE_CHAT_ACKS[next_index]}" body = ( "Okay, There are many scientific topics that could be discussed, " "when I learn something new I will be ready to talk to you about it." ) return " ".join([ack, body]) science_topics_names = science_topics_names if science_topics_names else list( science_topics.keys()) current_topic = random.sample(science_topics_names, 1)[0] local_utils.add_unused_topics(vars, current_topic) # save is_requested_topic_before shared_memory = state_utils.get_shared_memory(vars) is_requested_topic_before = shared_memory.get( "is_requested_topic_before") state_utils.save_to_shared_memory(vars, current_topic=current_topic, is_requested_topic_before=True) if is_requested_topic_before: next_index = state_utils.get_unrepeatable_index_from_rand_seq( vars, "nice_chat_acks", len(NICE_CHAT_ACKS), True, ) ack = f"{NICE_CHAT_ACKS[next_index]}" body = f"So, maybe next? Do you wanna talk about {current_topic}?" else: ack = "I think people who are interested in science are special." body = f"I like to talk about a variety of scientific topics. Do you wanna talk about {current_topic}?" state_utils.add_acknowledgement_to_response_parts(vars) if linkto_yes(vars): state_utils.set_can_continue(vars, MUST_CONTINUE) state_utils.set_confidence(vars, confidence=CONF_100) elif if_chat_about_science_topic_pattern(vars): state_utils.set_can_continue(vars, CAN_CONTINUE_SCENARIO) state_utils.set_confidence(vars, confidence=CONF_95) elif is_mentioned_science_pattern(vars): state_utils.set_can_continue(vars, CAN_CONTINUE_SCENARIO) state_utils.set_confidence(vars, confidence=CONF_95) else: return error_response(vars) return " ".join([ack, body]) except Exception as exc: logger.exception(exc) sentry_sdk.capture_exception(exc) return error_response(vars)
def oauth_handler(blueprint, token): """ Handles incoming OAuth events, login, signup :param blueprint: :param token: :return: """ if token is None: # Failed logger.info("Failed to log in with {}.".format(blueprint.name)) flash(_("Error logging in")) return False try: if blueprint.name == "github": response = blueprint.session.get("/user") elif blueprint.name == "google": response = blueprint.session.get("/plus/v1/people/me") elif blueprint.name == "facebook": response = blueprint.session.get("/me?fields=email") else: logger.critical("Missing blueprint handler for {}".format(blueprint.name)) flash(_("Error logging in")) return False except ValueError as e: sentry_sdk.capture_exception(e) flash(_("Error logging in")) return False if not response.ok: # Failed logger.info("Failed to fetch user info from {}.".format(blueprint.name)) logger.info(response) flash(_("Error logging in")) return False response = response.json() oauth_user_id = response["id"] # Get user ID try: # Check if existing service link authentication_link = AuthLinks.query.filter_by( provider=blueprint.name, provider_user_id=str(oauth_user_id), ).one() except NoResultFound: # New service link, at least store the token authentication_link = AuthLinks( provider=blueprint.name, provider_user_id=str(oauth_user_id), token=token["access_token"], ) logger.info("User not found, keeping token in memory") except Exception as e: # Failure in query! sentry_sdk.capture_exception(e) logger.error("Failed querying authentication links") flash(_("That account is not linked to any system account, check if you already have an account.")) return False # Link exists and it is associated with an user if authentication_link is not None and authentication_link.user_id is not None: login_user(User.query.get(authentication_link.user_id)) db.session.commit() logger.info("Successfully signed in with {}.".format(blueprint.name)) return False elif authentication_link is not None and \ authentication_link.user_id is None and \ "user_id" in session.keys(): try: authentication_link.user_id = int(session["user_id"]) # Update link with user id db.session.add(authentication_link) db.session.commit() return False except Exception as e: db.session.rollback() sentry_sdk.capture_exception(e) logger.error("Could not store user and oauth link") flash(_("Error signing up, please try again")) return False else: # Link does not exist or not associated if "oauth_sign_up" in session.keys() and \ session["oauth_sign_up"]: # If registration session["oauth_sign_up"] = False if "email" in response.keys(): user_email = response["email"] else: if "emails" in response.keys() and len(response["emails"]) > 0: user_email = response["emails"][0]["value"] else: user_email = None if "name" in response.keys(): if blueprint.name == "google": if "givenName" in response["name"].keys(): user_name = response["name"]["givenName"] else: logger.info("Google user does not have a givenName") flash(_("Error signing up")) return False else: user_name = response["name"] else: logger.info("User does not have a name!") flash(_("Error signing up")) return False if user_email is None or \ len(user_email) < len("*****@*****.**") or \ "@" not in user_email: # I'll assume noone with their own TLD will use this logger.info("User email is wrong or missing, trying other API endpoint") try: if blueprint.name == "github": # If we're authenticating against GitHub then we have to do # another get response = blueprint.session.get("/user/emails") if not response.ok: flash(_("Error signing up")) logger.info("Error requesting email addresses") return False else: response = response.json() if len(response) > 0 and "email" in response[0].keys(): user_email = response[0]["email"] else: user_email = None # Take the first email if not response[0]["verified"] or \ user_email is None or \ len(user_email) < len("*****@*****.**") or \ "@" not in user_email: flash(_( "You have no associated email addresses with your account or none of them are valid")) logger.error("User does not have any emails or none of them are valid") return False else: pass # All is okay again pass # New email is fine else: logger.info("No email addresses associated with the account") flash(_("You have no associated email addresses with that account")) return False except Exception: logger.info("Error asking for another emails") flash(_("Error signing up")) return False else: pass # Email is okay try: # Check if existing service link User.query.filter(User.email == user_email).one() flash(_("This email address is in use, you must log in with your password to link {provider}" .format(provider=blueprint.name))) logger.debug("Email address is in use, but not linked, to avoid hijacks the user must login") return False except NoResultFound: # Do not allow same email to sign up again pass user = User( email=user_email, username=user_name, password=hash_password(token_bytes(100)), active=True ) flash(_("Password is set randomly, use \"Forgot password\" to set another password")) try: db.session.add(user) # Populate User's ID first by committing db.session.commit() except Exception as e: db.session.rollback() sentry_sdk.capture_exception(e) logger.error("Could not store user and oauth link") flash(_("Error signing up")) return False try: authentication_link.user_id = user.id # Update link with user id db.session.add(authentication_link) db.session.commit() except Exception as e: db.session.rollback() sentry_sdk.capture_exception(e) logger.error("Could not store user and oauth link") flash(_("Error signing up")) return False login_user(user) db.session.commit() logger.info("Successfully signed up with {}.".format(blueprint.name)) return False else: logger.debug("User does not wish to sign up") flash(_("You do not have an account")) return False
def report_exception(self, exception): return sentry_sdk.capture_exception(exception)
def run_main(storage_path, skip_cert_verify=False, without_downloading_files=False): logging.basicConfig( filename=os.path.join(storage_path, 'MoodleDownloader.log'), level=logging.DEBUG, format='%(asctime)s %(levelname)s {%(module)s} %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logging.info('--- main started ---------------------') Log.info('Moodle Downloader starting...') if IS_DEBUG: logging.info( 'Debug-Mode detected. Errors will not be logged but instead' + ' re-risen.') debug_logger = logging.getLogger() debug_logger.setLevel(logging.ERROR) debug_logger.addHandler(ReRaiseOnError()) try: logging.debug('Loading config...') Log.debug('Loading config...') config = ConfigHelper(storage_path) config.load() except BaseException as e: logging.error('Error while trying to load the Configuration! ' + 'Exiting...', extra={'exception': e}) Log.error('Error while trying to load the Configuration!') sys.exit(-1) r_client = False try: sentry_dsn = config.get_property('sentry_dsn') if sentry_dsn: sentry_sdk.init(sentry_dsn) except BaseException: pass mail_service = MailService(config) console_service = ConsoleService(config) try: moodle = MoodleService(config, storage_path, skip_cert_verify) logging.debug( 'Checking for changes for the configured Moodle-Account....') Log.debug('Checking for changes for the configured Moodle-Account...') changed_courses = moodle.fetch_state() diff_count = 0 logging.debug('Start downloading changed files...') Log.debug('Start downloading changed files...') if (without_downloading_files): downloader = FakeDownloadService(changed_courses, moodle, storage_path) else: downloader = DownloadService(changed_courses, moodle, storage_path) downloader.run() changed_courses_to_notify = moodle.recorder.changes_to_notify() for course in changed_courses: diff_count += len(course.files) if diff_count > 0: logging.info( '%s changes found for the configured Moodle-Account.' % (diff_count)) Log.success('%s changes found for the configured Moodle-Account.' % (diff_count)) console_service.notify_about_changes_in_moodle(changed_courses) else: logging.info('No changes found for the configured Moodle-Account.') Log.warning('No changes found for the configured Moodle-Account.') if (len(changed_courses_to_notify) > 0): mail_service.notify_about_changes_in_moodle( changed_courses_to_notify) moodle.recorder.notified(changed_courses_to_notify) logging.debug('All done. Exiting...') Log.success('All done. Exiting..') except BaseException as e: error_formatted = traceback.format_exc() logging.error(error_formatted, extra={'exception': e}) if r_client: sentry_sdk.capture_exception(e) mail_service.notify_about_error(str(e)) logging.debug('Exception-Handling completed. Exiting...', extra={'exception': e}) Log.critical('Exception:\n%s' % (error_formatted)) Log.error('The following error occurred during execution: %s' % (str(e))) sys.exit(-1)
def proccess_create_or_modify_user_request( attribute_dict, organisation=None, allow_existing_user_modify=False, is_self_sign_up=False, modify_only=False, ): """ Takes a create or modify user request and determines the response. Normally what's in the top level API function, but here it's one layer down because there's multiple entry points for 'create user': - The admin api - The register api :param attribute_dict: attributes that can be supplied by the request maker :param organisation: what organisation the request maker belongs to. The created user is bound to the same org :param allow_existing_user_modify: whether to return and error when the user already exists for the supplied IDs :param is_self_sign_up: does the request come from the register api? :return: An http response """ if not attribute_dict.get('custom_attributes'): attribute_dict['custom_attributes'] = {} user_id = attribute_dict.get('user_id') email = attribute_dict.get('email') phone = attribute_dict.get('phone') referred_by = attribute_dict.get('referred_by') blockchain_address = attribute_dict.get('blockchain_address') provided_public_serial_number = attribute_dict.get('public_serial_number') if not blockchain_address and provided_public_serial_number: try: blockchain_address = to_checksum_address( provided_public_serial_number) # Since it's actually an ethereum address set the provided public serial number to None # so it doesn't get used as a transfer card provided_public_serial_number = None except Exception: pass require_transfer_card_exists = attribute_dict.get( 'require_transfer_card_exists', g.active_organisation.require_transfer_card) public_serial_number = (provided_public_serial_number or attribute_dict.get('payment_card_qr_code') or attribute_dict.get('payment_card_barcode')) location = attribute_dict.get('location') # address location geo_location = attribute_dict.get( 'geo_location') # geo location as str of lat, lng if geo_location: geo = geo_location.split(' ') lat = geo[0] lng = geo[1] else: # TODO: Work out how this passed tests when this wasn't definied properly!?! lat = None lng = None use_precreated_pin = attribute_dict.get('use_precreated_pin') use_last_4_digits_of_id_as_initial_pin = attribute_dict.get( 'use_last_4_digits_of_id_as_initial_pin') transfer_account_name = attribute_dict.get('transfer_account_name') first_name = attribute_dict.get('first_name') last_name = attribute_dict.get('last_name') business_usage_name = attribute_dict.get('business_usage_name') business_usage_id = None if business_usage_name: usage = TransferUsage.find_or_create(business_usage_name) business_usage_id = usage.id preferred_language = attribute_dict.get('preferred_language') primary_user_identifier = attribute_dict.get('primary_user_identifier') primary_user_pin = attribute_dict.get('primary_user_pin') initial_disbursement = attribute_dict.get('initial_disbursement', None) is_vendor = attribute_dict.get('is_vendor', None) if is_vendor is None: is_vendor = attribute_dict.get('vendor', False) is_tokenagent = attribute_dict.get('is_tokenagent', False) is_groupaccount = attribute_dict.get('is_groupaccount', False) # is_beneficiary defaults to the opposite of is_vendor is_beneficiary = attribute_dict.get( 'is_beneficiary', not is_vendor and not is_tokenagent and not is_groupaccount) if current_app.config['IS_USING_BITCOIN']: try: base58.b58decode_check(blockchain_address) except ValueError: response_object = { 'message': 'Blockchain Address {} Not Valid'.format(blockchain_address) } return response_object, 400 if isinstance(phone, bool): phone = None if phone and not is_self_sign_up: # phone has already been parsed if self sign up try: phone = proccess_phone_number(phone) except NumberParseException as e: response_object = {'message': 'Invalid Phone Number: ' + str(e)} return response_object, 400 # Work out if there's an existing transfer account to bind to existing_transfer_account = None if primary_user_identifier: primary_user = find_user_from_public_identifier( primary_user_identifier) if not primary_user or not primary_user.verify_password( primary_user_pin): response_object = {'message': 'Primary User not Found'} return response_object, 400 if not primary_user.verify_password(primary_user_pin): response_object = {'message': 'Invalid PIN for Primary User'} return response_object, 400 primary_user_transfer_account = primary_user.transfer_account if not primary_user_transfer_account: response_object = { 'message': 'Primary User has no transfer account' } return response_object, 400 if not (phone or email or public_serial_number or blockchain_address): response_object = {'message': 'Must provide a unique identifier'} return response_object, 400 if use_precreated_pin and not public_serial_number: response_object = { 'message': 'Must provide public serial number to use a transfer card or pre-created pin' } return response_object, 400 if public_serial_number: public_serial_number = str(public_serial_number) if use_precreated_pin or require_transfer_card_exists: transfer_card = TransferCard.query.filter_by( public_serial_number=public_serial_number).first() if not transfer_card: response_object = {'message': 'Transfer card not found'} return response_object, 400 business_usage = None if business_usage_id: business_usage = TransferUsage.query.get(business_usage_id) if not business_usage: response_object = { 'message': f'Business Usage not found for id {business_usage_id}' } return response_object, 400 referred_by_user = find_user_from_public_identifier(referred_by) if referred_by and not referred_by_user: response_object = { 'message': f'Referrer user not found for public identifier {referred_by}' } return response_object, 400 existing_user = find_user_from_public_identifier(email, phone, public_serial_number, blockchain_address) if modify_only: existing_user = User.query.get(user_id) if modify_only and existing_user is None: response_object = {'message': 'User not found'} return response_object, 404 if existing_user: if not allow_existing_user_modify: response_object = {'message': 'User already exists for Identifier'} return response_object, 400 try: user = update_transfer_account_user( existing_user, first_name=first_name, last_name=last_name, preferred_language=preferred_language, phone=phone, email=email, location=location, public_serial_number=public_serial_number, use_precreated_pin=use_precreated_pin, existing_transfer_account=existing_transfer_account, is_beneficiary=is_beneficiary, is_vendor=is_vendor, is_tokenagent=is_tokenagent, is_groupaccount=is_groupaccount, business_usage=business_usage) if referred_by_user: user.referred_by.clear( ) # otherwise prior referrals will remain... user.referred_by.append(referred_by_user) set_custom_attributes(attribute_dict, user) flag_modified(user, "custom_attributes") db.session.commit() response_object = { 'message': 'User Updated', 'data': { 'user': user_schema.dump(user).data } } return response_object, 200 except Exception as e: response_object = {'message': str(e)} return response_object, 400 user = create_transfer_account_user( first_name=first_name, last_name=last_name, preferred_language=preferred_language, phone=phone, email=email, public_serial_number=public_serial_number, organisation=organisation, blockchain_address=blockchain_address, transfer_account_name=transfer_account_name, lat=lat, lng=lng, use_precreated_pin=use_precreated_pin, use_last_4_digits_of_id_as_initial_pin= use_last_4_digits_of_id_as_initial_pin, existing_transfer_account=existing_transfer_account, is_beneficiary=is_beneficiary, is_vendor=is_vendor, is_tokenagent=is_tokenagent, is_groupaccount=is_groupaccount, is_self_sign_up=is_self_sign_up, business_usage=business_usage, initial_disbursement=initial_disbursement) if referred_by_user: user.referred_by.append(referred_by_user) if attribute_dict.get('gender'): attribute_dict['custom_attributes']['gender'] = attribute_dict.get( 'gender') if attribute_dict.get('bio'): attribute_dict['custom_attributes']['bio'] = attribute_dict.get('bio') set_custom_attributes(attribute_dict, user) if is_self_sign_up and attribute_dict.get('deviceInfo', None) is not None: save_device_info(device_info=attribute_dict.get('deviceInfo'), user=user) # Location fires an async task that needs to know user ID db.session.flush() if location: user.location = location if phone: if is_self_sign_up: send_one_time_code(phone=phone, user=user) return { 'message': 'User Created. Please verify phone number.', 'otp_verify': True }, 200 elif current_app.config['ONBOARDING_SMS']: try: send_onboarding_sms_messages(user) except Exception as e: print(e) sentry_sdk.capture_exception(e) pass response_object = { 'message': 'User Created', 'data': { 'user': user_schema.dump(user).data } } return response_object, 200
def not_allowed(e): capture_exception(e) return jsonify(error=str(e)), 403
def send_printer_notifications( self, notification_type: str, notification_data: dict, printer: Printer, print_: Optional[Print], extra_context: Optional[Dict] = None, plugin_names: Tuple[str, ...] = (), fail_silently: bool = True, ) -> None: feature = self.feature_for_notification_type(notification_type, notification_data) if not feature: return if plugin_names: names = list(set(self.notification_plugin_names()) & set(plugin_names)) else: names = self.notification_plugin_names() # select matching, enabled & configured nsettings = list(NotificationSetting.objects.filter( user_id=printer.user_id, enabled=True, name__in=names, **{feature.name: True} )) if not nsettings: LOGGER.debug("no matching NotificationSetting objects, ignoring printer notification") return if print_ and print_.poster_url: img_url = print_.poster_url else: img_url = get_rotated_jpg_url(printer, force_snapshot=True) user_ctx = self.get_user_context(printer.user) printer_ctx = self.get_printer_context(printer) print_ctx = self.get_print_context(print_) for nsetting in nsettings: LOGGER.debug(f'forwarding event {"notification_type"} to plugin "{nsetting.name}" (pk: {nsetting.pk})') try: plugin = self.notification_plugin_by_name(nsetting.name) if not plugin: continue context = PrinterNotificationContext( feature=feature, config=nsetting.config, user=user_ctx, printer=printer_ctx, print=print_ctx, notification_type=notification_type, notification_data=notification_data, extra_context=extra_context or {}, img_url=img_url, ) self._send_printer_notification(nsetting=nsetting, context=context) except NotImplementedError: pass except Exception: if fail_silently: LOGGER.exception('send_printer_notification plugin error') capture_exception() else: raise
def capture_exception(*args, **kwargs): if is_sentry_enabled(): sentry_sdk.capture_exception() else: print_exc()
def input_error(e): capture_exception(e) return jsonify(error=str(e)), 406
def handle(self, data, address): try: self.__handle(data, address) except Exception: sentry_sdk.capture_exception()
def internal_error(e): capture_exception(e) return jsonify(error=str(e)), 503
def post(self, request): try: first_name = request.data.get("first_name", "User") last_name = request.data.get("last_name", "") channel = request.data.get("channel").strip().lower() password = request.data.get("password") if not channel or not password: return Response( { "error": "Something went wrong. Please try again later or contact the support team." }, status=status.HTTP_400_BAD_REQUEST, ) random_string = uuid.uuid4().hex if check_valid_email_address(channel.strip().lower()): medium = "email" elif check_valid_phone_number(channel.strip().lower()): medium = "mobile" else: capture_message("Invalid medium") return Response( { "error": "Something went wrong. Please try again later or contact the support team." }, status=status.HTTP_400_BAD_REQUEST, ) if medium == "email": # https://docs.djangoproject.com/en/3.1/ref/models/querysets/#exists if User.objects.filter( email=channel.strip().lower() ).exists(): return Response( { "error": "This email address is already taken. Please try another one." }, status=status.HTTP_400_BAD_REQUEST, ) username = uuid.uuid4().hex user = User( username=username, email=channel.strip().lower(), first_name=first_name, last_name=last_name, mobile_number=random_string, ) elif medium == "mobile": if User.objects.filter( mobile_number=channel ).exists(): return Response( { "error": "This mobile number is already taken. Please try another one." }, status=status.HTTP_400_BAD_REQUEST, ) username = uuid.uuid4().hex user = User( username=username, email=random_string, first_name=first_name, last_name=last_name, mobile_number=channel.strip().lower(), ) else: # incase this inbound capture_message("Sign up endpoint wrong medium data") user = None return Response( { "error": "Something went wrong. Please try again later or contact the support team." }, status=status.HTTP_400_BAD_REQUEST, ) user.set_password(password) user.last_active = timezone.now() user.last_login_time = timezone.now() user.last_login_ip = request.META.get("REMOTE_ADDR") user.last_login_medium = medium user.last_login_uagent = request.META.get("HTTP_USER_AGENT") user.token_updated_at = timezone.now() user.save() serialized_user = UserSerializer(user).data access_token, refresh_token = get_tokens_for_user(user) data = { "access_token": access_token, "refresh_token": refresh_token, "user": serialized_user, } return Response(data, status=status.HTTP_201_CREATED) except Exception as e: print(e) capture_exception(e) return Response( { "error": "Something went wrong. Please try again later or contact the support team." }, status=status.HTTP_400_BAD_REQUEST, )
async def _on_command_error(self, ctx, error, bypass=False): if (hasattr(ctx.command, "on_error") or (ctx.command and hasattr( ctx.cog, f"_{ctx.command.cog_name}__error")) and not bypass): # Do nothing if the command/cog has its own error handler return if isinstance(error, commands.CommandNotFound): return elif isinstance(error, commands.MissingRequiredArgument): await ctx.send( _("Oops! You forgot a required argument: `{arg}`").format( arg=error.param.name)) elif isinstance(error, commands.BadArgument): if isinstance(error, NotInRange): await ctx.send(error.text) elif isinstance(error, UserHasNoChar): await ctx.send( _("The user you specified as a parameter does not have a" " character.")) elif isinstance(error, InvalidCrateRarity): await ctx.send( _("You did not enter a valid crate rarity. Possible ones are:" " common, uncommon, rare, magic and legendary.")) elif isinstance(error, InvalidCoinSide): await ctx.send( _("You did not enter a valid coin side. Please use `heads` or" " `tails`.")) elif isinstance(error, DateOutOfRange): await ctx.send( _("You entered a date that was out of range. It should be newer" " than {date}.").format(date=error.min_)) else: await ctx.send(_("You used a malformed argument!")) elif isinstance(error, GlobalCooldown): return await ctx.send( _("You are being rate-limited. Chill down, you can use a command" " again in {time}s.").format( time=round(error.retry_after, 2))) elif isinstance(error, commands.CommandOnCooldown): return await ctx.send( _("You are on cooldown. Try again in {time}.").format( time=timedelta(seconds=int(error.retry_after)))) elif hasattr(error, "original") and isinstance(error.original, discord.HTTPException): return # not our fault elif isinstance(error, commands.NotOwner): await ctx.send(embed=discord.Embed( title=_("Permission denied"), description=_( ":x: This command is only avaiable for the bot owner."), colour=0xFF0000, )) elif isinstance(error, commands.CheckFailure): if isinstance(error, utils.checks.NoCharacter): await ctx.send(_("You don't have a character yet.")) elif isinstance(error, utils.checks.NeedsNoCharacter): await ctx.send( _("This command requires you to not have created a character yet." " You already have one.")) elif isinstance(error, utils.checks.NeedsGod): await ctx.send( _("You need to be following a god for this command. Please use" " `{prefix}follow` to choose one.").format( prefix=ctx.prefix)) elif isinstance(error, utils.checks.NoGuild): await ctx.send( _("You need to have a guild to use this command.")) elif isinstance(error, utils.checks.NeedsNoGuild): await ctx.send( _("You need to be in no guild to use this command.")) elif isinstance(error, utils.checks.NoGuildPermissions): await ctx.send( _("Your rank in the guild is too low to use this command.") ) elif isinstance(error, utils.checks.NeedsNoGuildLeader): await ctx.send( _("You mustn't be the owner of a guild to use this command." )) elif isinstance(error, utils.checks.WrongClass): await ctx.send(embed=discord.Embed( title=_("Permission denied"), description= _(":x: You don't have the permissions to use this command. It" " is thought for {error} class users.").format( error=error), colour=0xFF0000, )) elif isinstance(error, utils.checks.NeedsNoAdventure): await ctx.send( _("You are already on an adventure. Use `{prefix}status` to see" " how long it lasts.").format(prefix=ctx.prefix)) elif isinstance(error, utils.checks.NeedsAdventure): await ctx.send( _("You need to be on an adventure to use this command. Try" " `{prefix}adventure`!").format(prefix=ctx.prefix)) elif isinstance(error, NeedsToBeInVoiceChat): await ctx.send( _("You need to be in a voice chat to use this command.")) elif isinstance(error, VoteDidNotPass): await ctx.send(_("The vote did not pass.")) elif isinstance(error, NeedsToBePlaying): await ctx.send( _("You need to be playing music, for example with `{prefix}play`," " to use this command.").format(prefix=ctx.prefix)) elif isinstance(error, utils.checks.PetGone): await ctx.send( _("Your pet has gone missing. Maybe some aliens abducted it?" " Since you can't find it anymore, you are no longer a" " {profession}").format(profession=_("Ranger"))) classes = ctx.character_data["class"] for evolve in ["Caretaker" ] + ctx.bot.get_class_evolves()["Ranger"]: if evolve in classes: idx = classes.index(evolve) break classes[idx] = "No Class" async with self.bot.pool.acquire() as conn: await conn.execute( 'UPDATE profile SET "class"=$1 WHERE "user"=$2;', classes, ctx.author.id, ) elif isinstance(error, utils.checks.PetDied): await ctx.send( _("Your pet **{pet}** died! You did not give it enough to eat or" " drink. Because of your bad treatment, you are no longer a" " {profession}.").format(pet=ctx.pet_data["name"], profession=_("Ranger"))) elif isinstance(error, utils.checks.PetRanAway): await ctx.send( _("Your pet **{pet}** ran away! You did not show it your love" " enough! Because of your bad treatment, you are no longer a" " {profession}.").format(pet=ctx.pet_data["name"], profession=_("Ranger"))) elif isinstance(error, utils.checks.NoPatron): await ctx.send( _("You need to be a {tier} tier donator to use this command." " Please head to `{prefix}donate` and make sure you joined the" " support server if you decide to support us.").format( tier=error.tier.name.title(), prefix=ctx.prefix)) elif isinstance(error, utils.checks.AlreadyRaiding): await ctx.send( _("There is another raid already ongoing. Try again at a later" " time.")) elif isinstance(error, utils.checks.NoCityOwned): await ctx.send(_("Your alliance does not own a city.")) elif isinstance(error, utils.checks.CityOwned): await ctx.send(_("Your alliance already owns a city.")) elif isinstance(error, utils.checks.NoAlliancePermissions): await ctx.send(_("Your alliance rank is too low.")) elif isinstance(error, utils.checks.NoOpenHelpRequest): await ctx.send( _("Your server does not have an open help request.")) else: await ctx.send(embed=discord.Embed( title=_("Permission denied"), description= _(":x: You don't have the permissions to use this command. It" " is thought for other users."), colour=0xFF0000, )) elif isinstance(error, NoChoice): await ctx.send(_("You did not choose anything.")) elif isinstance(error, commands.CommandInvokeError) and hasattr( error, "original"): if isinstance( error.original, ( ClientOSError, ServerDisconnectedError, ContentTypeError, TimeoutError, ), ): # Called on 500 HTTP responses # TimeoutError: A Discord operation timed out. All others should be handled by us return elif isinstance(error.original, AsyncpgDataError): return await ctx.send( _("An argument or value you entered was far too high for me to" " handle properly!")) elif isinstance(error.original, LookupError): await ctx.send( _("The languages have been reloaded while you were using a" " command. The execution therefore had to be stopped. Please" " try again.")) if not SENTRY_SUPPORT: print("In {}:".format(ctx.command.qualified_name), file=sys.stderr) traceback.print_tb(error.original.__traceback__) print( "{0}: {1}".format(error.original.__class__.__name__, error.original), file=sys.stderr, ) else: try: raise error.original except Exception as e: if ctx.guild: guild_id = ctx.guild.id else: guild_id = "None" with sentry_sdk.push_scope() as scope: scope.set_context("message", {"content": ctx.message.content}) scope.set_extra("guild_id", str(guild_id)) scope.set_extra("channel_id", str(ctx.channel.id)) scope.set_extra("message_id", str(ctx.message.id)) scope.set_extra("user_id", str(ctx.author.id)) scope.set_tag("command", ctx.command.qualified_name) sentry_sdk.capture_exception(e) await ctx.send( _("The command you tried to use ran into an error. The incident" " has been reported and the team will work hard to fix the" " issue!")) await ctx.bot.reset_cooldown(ctx) if ctx.command.parent: if (ctx.command.root_parent.name == "guild" and getattr(ctx, "character_data") is not None): await self.bot.reset_guild_cooldown(ctx) elif ctx.command.root_parent.name == "alliance": await self.bot.reset_alliance_cooldown(ctx)
def attendance_selection(self, event): """ When a penny chat is shared, the user can click on "will attend" and "won't attend" buttons, and the resulting event is handled here. There are two side effects of this method: 1. A Participant entry for the penny_chat and the user will be created or updated with the appropriate role. 2. The organizer will be notified of "important" changes. 3. The user will be told that the organizer will be notified. The specifics are rather complicated, but you can see how they work in bot.tests.processors.test_pennychat.test_PennyChatBotModule_attendance_selection """ participant_role = Participant.ATTENDEE if event['actions'][0]['action_id'] == PENNY_CHAT_CAN_NOT_ATTEND: participant_role = None try: profile = get_or_create_social_profile_from_slack_id( event['user']['id'], slack_client=self.slack_client, ) action_value = json.loads(event['actions'][0]['value']) penny_chat_id = action_value[PENNY_CHAT_ID] penny_chat = PennyChat.objects.get(pk=penny_chat_id) organizers = penny_chat.get_organizers() for organizer in organizers: if organizer == profile.user: # TODO notify user that it's silly to attend or not attend their own event return # create organizer notification message (even if we choose not to use it below) timestamp = int(penny_chat.date.astimezone(utc).timestamp()) date_text = f'<!date^{timestamp}^{{date}} at {{time}}|{penny_chat.date}>' _not = '' if participant_role == Participant.ATTENDEE else ' _not_' notification = ( f'<@{profile.slack_id}> will{_not} attend your Penny Chat "{penny_chat.title}" ({date_text})' ) we_will_notify_organizer = 'Thank you. We will notify the organizer.' changed = False if participant_role: participant, created = Participant.objects.update_or_create( user=profile.user, penny_chat=penny_chat, defaults={'role': participant_role} ) if created: changed = True else: num_deleted, _ = Participant.objects.filter(user=profile.user, penny_chat=penny_chat).delete() if num_deleted > 0: changed = True if changed: organizer_profile = SocialProfile.objects.get( user=organizer, slack_team_id=penny_chat.created_from_slack_team_id, ) self.slack_client.chat_postMessage(channel=organizer_profile.slack_id, text=notification) chat_postEphemeral_with_fallback( self.slack_client, channel=event['channel']['id'], user=profile.slack_id, text=we_will_notify_organizer, ) except RuntimeError as e: capture_exception(e) chat_postEphemeral_with_fallback( self.slack_client, channel=event['channel']['id'], user=event['user']['id'], text="An error has occurred. Please try again in a moment.", ) logging.exception('error in penny chat attendance selection')
def _deliver_subscription_report(subscription_id: int, previous_value: Optional[str] = None, invite_message: Optional[str] = None) -> None: subscription = (Subscription.objects.prefetch_related( "dashboard__insights").select_related( "created_by", "insight", "dashboard", ).get(pk=subscription_id)) is_new_subscription_target = False if previous_value is not None: # If previous_value is set we are triggering a "new" or "invite" message is_new_subscription_target = subscription.target_value != previous_value if not is_new_subscription_target: # Same value as before so nothing to do return if subscription.target_type == "email": insights, assets = generate_assets(subscription) # Send emails emails = subscription.target_value.split(",") if is_new_subscription_target: previous_emails = previous_value.split( ",") if previous_value else [] emails = list(set(emails) - set(previous_emails)) for email in emails: try: send_email_subscription_report( email, subscription, assets, invite_message=invite_message or "" if is_new_subscription_target else None, total_asset_count=len(insights), ) incr("subscription_email_send_success") except Exception as e: logger.error(e) capture_exception(e) incr("subscription_email_send_failure") elif subscription.target_type == "slack": insights, assets = generate_assets(subscription) try: send_slack_subscription_report( subscription, assets, total_asset_count=len(insights), is_new_subscription=is_new_subscription_target) incr("subscription_slack_send_success") except Exception as e: incr("subscription_slack_send_failure") logger.error(e) else: raise NotImplementedError( f"{subscription.target_type} is not supported") if not is_new_subscription_target: subscription.set_next_delivery_date(subscription.next_delivery_date) subscription.save()
def main(): """Entry point""" from nipype import logging as nlogging from multiprocessing import set_start_method, Process, Manager from ..viz.reports import generate_reports from ..utils.bids import write_derivative_description set_start_method('forkserver') warnings.showwarning = _warn_redirect opts = get_parser().parse_args() exec_env = os.name # special variable set in the container if os.getenv('IS_DOCKER_8395080871'): exec_env = 'singularity' cgroup = Path('/proc/1/cgroup') if cgroup.exists() and 'docker' in cgroup.read_text(): exec_env = 'docker' if os.getenv('DOCKER_VERSION_8395080871'): exec_env = 'fmriprep-docker' sentry_sdk = None if not opts.notrack: import sentry_sdk from ..__about__ import __version__ environment = "prod" release = __version__ if not __version__: environment = "dev" release = "dev" elif bool(int(os.getenv('FMRIPREP_DEV', 0))) or ('+' in __version__): environment = "dev" def before_send(event, hints): # Filtering log messages about crashed nodes if 'logentry' in event and 'message' in event['logentry']: msg = event['logentry']['message'] if msg.startswith("could not run node:"): return None elif msg.startswith("Saving crash info to "): return None elif re.match("Node .+ failed to run on host .+", msg): return None if 'breadcrumbs' in event and isinstance(event['breadcrumbs'], list): fingerprints_to_propagate = ['no-disk-space', 'memory-error', 'permission-denied', 'keyboard-interrupt'] for bc in event['breadcrumbs']: msg = bc.get('message', 'empty-msg') if msg in fingerprints_to_propagate: event['fingerprint'] = [msg] break return event sentry_sdk.init("https://[email protected]/1137693", release=release, environment=environment, before_send=before_send) with sentry_sdk.configure_scope() as scope: scope.set_tag('exec_env', exec_env) if exec_env == 'fmriprep-docker': scope.set_tag('docker_version', os.getenv('DOCKER_VERSION_8395080871')) free_mem_at_start = round(psutil.virtual_memory().free / 1024**3, 1) scope.set_tag('free_mem_at_start', free_mem_at_start) scope.set_tag('cpu_count', cpu_count()) # Memory policy may have a large effect on types of errors experienced overcommit_memory = Path('/proc/sys/vm/overcommit_memory') if overcommit_memory.exists(): policy = {'0': 'heuristic', '1': 'always', '2': 'never'}.get(overcommit_memory.read_text().strip(), 'unknown') scope.set_tag('overcommit_memory', policy) if policy == 'never': overcommit_kbytes = Path('/proc/sys/vm/overcommit_memory') kb = overcommit_kbytes.read_text().strip() if kb != '0': limit = '{}kB'.format(kb) else: overcommit_ratio = Path('/proc/sys/vm/overcommit_ratio') limit = '{}%'.format(overcommit_ratio.read_text().strip()) scope.set_tag('overcommit_limit', limit) else: scope.set_tag('overcommit_limit', 'n/a') else: scope.set_tag('overcommit_memory', 'n/a') scope.set_tag('overcommit_limit', 'n/a') for k, v in vars(opts).items(): scope.set_tag(k, v) # Validate inputs if not opts.skip_bids_validation: print("Making sure the input data is BIDS compliant (warnings can be ignored in most " "cases).") validate_input_dir(exec_env, opts.bids_dir, opts.participant_label) # FreeSurfer license default_license = str(Path(os.getenv('FREESURFER_HOME')) / 'license.txt') # Precedence: --fs-license-file, $FS_LICENSE, default_license license_file = opts.fs_license_file or os.getenv('FS_LICENSE', default_license) if not os.path.exists(license_file): raise RuntimeError( 'ERROR: a valid license file is required for FreeSurfer to run. ' 'FMRIPREP looked for an existing license file at several paths, in this ' 'order: 1) command line argument ``--fs-license-file``; 2) ``$FS_LICENSE`` ' 'environment variable; and 3) the ``$FREESURFER_HOME/license.txt`` path. ' 'Get it (for free) by registering at https://' 'surfer.nmr.mgh.harvard.edu/registration.html') os.environ['FS_LICENSE'] = license_file # Retrieve logging level log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG)) # Set logging logger.setLevel(log_level) nlogging.getLogger('nipype.workflow').setLevel(log_level) nlogging.getLogger('nipype.interface').setLevel(log_level) nlogging.getLogger('nipype.utils').setLevel(log_level) errno = 0 # Call build_workflow(opts, retval) with Manager() as mgr: retval = mgr.dict() p = Process(target=build_workflow, args=(opts, retval)) p.start() p.join() if p.exitcode != 0: sys.exit(p.exitcode) fmriprep_wf = retval['workflow'] plugin_settings = retval['plugin_settings'] bids_dir = retval['bids_dir'] output_dir = retval['output_dir'] work_dir = retval['work_dir'] subject_list = retval['subject_list'] run_uuid = retval['run_uuid'] if not opts.notrack: with sentry_sdk.configure_scope() as scope: scope.set_tag('run_uuid', run_uuid) scope.set_tag('npart', len(subject_list)) retcode = retval['return_code'] if fmriprep_wf is None: sys.exit(1) if opts.write_graph: fmriprep_wf.write_graph(graph2use="colored", format='svg', simple_form=True) if opts.reports_only: sys.exit(int(retcode > 0)) if opts.boilerplate: sys.exit(int(retcode > 0)) # Sentry tracking if not opts.notrack: sentry_sdk.add_breadcrumb(message='fMRIPrep started', level='info') sentry_sdk.capture_message('fMRIPrep started', level='info') # Check workflow for missing commands missing = check_deps(fmriprep_wf) if missing: print("Cannot run fMRIPrep. Missing dependencies:") for iface, cmd in missing: print("\t{} (Interface: {})".format(cmd, iface)) sys.exit(2) # Clean up master process before running workflow, which may create forks gc.collect() try: fmriprep_wf.run(**plugin_settings) except RuntimeError as e: errno = 1 if "Workflow did not execute cleanly" not in str(e): sentry_sdk.capture_exception(e) raise finally: # Generate reports phase errno += generate_reports(subject_list, output_dir, work_dir, run_uuid, sentry_sdk=sentry_sdk) write_derivative_description(bids_dir, str(Path(output_dir) / 'fmriprep')) if not opts.notrack and errno == 0: sentry_sdk.capture_message('fMRIPrep finished without errors', level='info') sys.exit(int(errno > 0))
def _send_email( campaign_key: str, to: List[Dict[str, str]], subject: str, headers: Dict, txt_body: str = "", html_body: str = "", reply_to: Optional[str] = None, ) -> None: """ Sends built email message asynchronously. """ messages: List = [] records: List = [] with transaction.atomic(): for dest in to: record, _ = MessagingRecord.objects.get_or_create( raw_email=dest["raw_email"], campaign_key=campaign_key) # Lock object (database-level) while the message is sent record = MessagingRecord.objects.select_for_update().get( pk=record.pk) # If an email for this campaign was already sent to this user, skip recipient if record.sent_at: record.save() # release DB lock continue records.append(record) reply_to = reply_to or settings.EMAIL_REPLY_TO email_message = mail.EmailMultiAlternatives( subject=subject, body=txt_body, to=[dest["recipient"]], headers=headers, reply_to=[reply_to] if reply_to else None, ) email_message.attach_alternative(html_body, "text/html") messages.append(email_message) connection = None try: connection = mail.get_connection() connection.open() connection.send_messages(messages) for record in records: record.sent_at = timezone.now() record.save() except Exception as err: # Handle exceptions gracefully to avoid breaking the entire task for all teams # but make sure they're tracked on Sentry. capture_exception(err) finally: # ensure that connection has been closed try: connection.close() # type: ignore except Exception: pass
def not_found(e): capture_exception(e) return jsonify(error=str(e)), 404
def handle(self, *args, **options): if not settings.FXA_EVENTS_ACCESS_KEY_ID: raise CommandError('AWS SQS Credentials not configured') if not settings.FXA_EVENTS_QUEUE_ENABLE: raise CommandError('FxA Events Queue is not enabled') sqs = boto3.resource('sqs', region_name=settings.FXA_EVENTS_QUEUE_REGION, aws_access_key_id=settings.FXA_EVENTS_ACCESS_KEY_ID, aws_secret_access_key=settings.FXA_EVENTS_SECRET_ACCESS_KEY) queue = sqs.Queue(settings.FXA_EVENTS_QUEUE_URL) try: # Poll for messages indefinitely. while True: self.snitch() msgs = queue.receive_messages(WaitTimeSeconds=settings.FXA_EVENTS_QUEUE_WAIT_TIME, MaxNumberOfMessages=10) for msg in msgs: if not (msg and msg.body): continue statsd.incr('fxa.events.message.received') try: data = json.loads(msg.body) event = json.loads(data['Message']) except ValueError: # body was not JSON statsd.incr('fxa.events.message.json_error') with sentry_sdk.configure_scope() as scope: scope.set_extra('msg.body', msg.body) sentry_sdk.capture_exception() msg.delete() continue event_type = event.get('event', '__NONE__').replace(':', '-') statsd.incr('fxa.events.message.received.{}'.format(event_type)) if event_type not in FXA_EVENT_TYPES: statsd.incr('fxa.events.message.received.{}.IGNORED'.format(event_type)) # we can safely remove from the queue message types we don't need # this keeps the queue from filling up with old messages msg.delete() continue try: FXA_EVENT_TYPES[event_type].delay(event) except Exception: # something's wrong with the queue. try again. statsd.incr('fxa.events.message.queue_error') with sentry_sdk.configure_scope() as scope: scope.set_tag('action', 'retried') sentry_sdk.capture_exception() continue statsd.incr('fxa.events.message.success') msg.delete() except KeyboardInterrupt: sys.exit('\nBuh bye')