def __call__(instance, pk_str): try: flush_transaction() pk = int(pk_str) data = JobData.objects.get(pk=pk) except (TypeError, ValueError): error_msg = "Invalid value for pk" error_extra = { "pk tried": pk_str } raven_client.captureMessage(error_msg, extra=error_extra, stack=True) except JobData.DoesNotExist: error_msg = "Unable to find beanstalk job data." error_extra = { "pk tried": pk } raven_client.captureMessage(error_msg, extra=error_extra, stack=True) else: try: val = instance.f(data) if self.cleanup: data.delete() return val except Exception: raven_client.captureException()
def save(self, commit=True): instance = forms.ModelForm.save(self, commit=False) hostname = instance.hostname if commit: editing = bool(instance.pk) instance.save() self.save_m2m() instance.users.remove(instance.owner) if not editing: ret = call_api("container.create", name=hostname, template=self.cleaned_data.get( "template", "debian")) if ret is None or ret[0] == 1: client.captureMessage( "Failed to create VM: {}".format(ret)) instance.delete() return None else: if ret[0] != 2: instance.uuid = uuid.UUID(ret[1].split("\n")[-1]) instance.save() elif not self.old_hostname == hostname: if "name" in self.changed_data: ret = call_api("container.set_hostname", name=str(instance.uuid), old_hostname=self.old_hostname, new_hostname=hostname) if ret is None or ret != 0: client.captureMessage( "Failed to change VM hostname: {}".format(ret)) return instance return instance
def send_letter(self, template, recipient): """ Sends a letter. https://www.stannp.com/direct-mail-api/letters """ data = {} # we want to use postal_full_name on envelopes, but the API does not # support that. The API expects {title} {first_name} {last_name}, so we # put postal_full_name in title and leave the other fields blank. data['recipient[title]'] = recipient['postal_full_name'] data['recipient[address1]'] = recipient['address_line_1'] data['recipient[address2]'] = recipient['address_line_2'] data['recipient[city]'] = recipient['locality'] data['recipient[postcode]'] = recipient['postal_code'] data['recipient[country]'] = recipient['country'] # custom_fields = [('field_name', 'field_value')] for field in recipient['custom_fields']: data['recipient[{}]'.format(field[0])] = field[1] data['template'] = template data['test'] = self.test_mode response = self.post( 'https://dash.stannp.com/api/v1/letters/create', data, ) if response.status_code != requests.codes.ok: client.captureMessage(response.content, stack=True) return response
def save_log_info_to_db(sampleID, dynamicReport_jsonFileName, hashValue, stime): try: etime = str(datetime.datetime.today()) pwd = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(5)) sampleEntry = Sample.objects.get(id = sampleID) queueEntry = Queue.objects.get(sample_id=sampleEntry, analyzer_type='dynamic').bot analyzerEntry, created = Analyzer.objects.get_or_create(name='DroidBox 4.1.1', type='dynamic', os='android', tools_integrated='DroidBox 4.1.1 and Evasion', machine_id=queueEntry) isPublic = Queue.objects.get(sample_id = sampleEntry, analyzer_type = 'dynamic').public Reports.objects.create(sample_id = sampleEntry, filesystem_position = dynamicReport_jsonFileName, type_of_report = 'dynamic', analyzer_id = analyzerEntry, os = 'android', password = pwd, status = 'done', start_of_analysis = stime, end_of_analysis = etime, public = isPublic) except Exception, ex: print "Error while writing DroidBox report data to database!" print ex client.captureMessage(message='Error while writing DroidBox report data to database!', level='error', extra={'sha256':str(hashValue[2])}, tags={'file':'parseDynamicLogFile.py'})
def verify_receipt(receipt_data, user=None): """ Returns the receipt data, or raises a ValidationError. """ #data = json.dumps({'receipt-data': '{' + receipt_data + '}'}) data = '{{\n "receipt-data" : "{}" \n}}'.format(receipt_data) def verify(url): tries = 3 for try_ in range(1, tries + 1): try: req = urllib2.Request(url, data) resp = urllib2.urlopen(req, timeout=18) # app timeout is supposed to be 60 return json.loads(resp.read()) except (urllib2.URLError, socket_error) as e: if try_ == tries: raise e cleaned_data = verify(settings.IAP_VERIFICATION_URL) # See: http://developer.apple.com/library/ios/#technotes/tn2259/_index.html if cleaned_data['status'] == 21007: cleaned_data = verify(settings.IAP_VERIFICATION_SANDBOX_URL) if cleaned_data['status'] != 0: extra = {'status': cleaned_data['status']} if user is not None and user.is_authenticated(): extra['username'] = user.username extra['response_from_apple'] = json.dumps(cleaned_data) client.captureMessage('IAP receipt validation failed', extra=extra) raise ValidationError("Your purchase went through, but there was an error processing it. Please contact support: [email protected]") return cleaned_data['receipt']
def update_video_creative_media(): from ui.storage.models import CreativeVideo try: CreativeVideo.liverail_update_media_all() except LiveRailDBError as e: client.captureMessage(e.message, level=logging.INFO)
def toggle_data(self, user, source, public): if (source not in get_source_labels() and not source.startswith('direct-sharing-')): error_msg = ('Public sharing toggle attempted for ' 'unexpected source "{}"'.format(source)) django_messages.error(self.request, error_msg) if not settings.TESTING: raven_client.captureMessage(error_msg) participant = user.member.public_data_participant access, _ = PublicDataAccess.objects.get_or_create( participant=participant, data_source=source) access.is_public = True if public == 'True' else False access.save() if source.startswith('direct-sharing-'): match = re.match(r'direct-sharing-(?P<id>\d+)', source) if match: project = DataRequestProject.objects.get( id=int(match.group('id'))) if project.approved and not ActivityFeed.objects.filter( member=user.member, project=project, action='publicly-shared').exists(): event = ActivityFeed( member=user.member, project=project, action='publicly-shared') event.save()
def load_beanstalk_data(instance, beanstalk_data_str): try: beanstalk_data = json.loads(beanstalk_data_str) instance.beanstalk_data = beanstalk_data instance.jobdata_pk = beanstalk_data["jobdata_pk"] instance.attempt = beanstalk_data.get("attempt", 1) return True except (TypeError, ValueError): error_msg = "Unable to load json data." culprit = instance.get_sentry_culprit("load_beanstalk_data") error_data = { "culprit": culprit, "extra": { "data string": beanstalk_data_str, }, } raven_client.captureMessage(error_msg, data=error_data, stack=True) except KeyError as e: error_msg = "Missing required parameters for retry data beanstalk job." culprit = instance.get_sentry_culprit("load_beanstalk_data") error_data = { "culprit": culprit, "extra": { "error": str(e), } } raven_client.captureMessage(error_msg, data=error_data, stack=True) return False
def start_task(user, source, force=False): """ Send a task to data-processing. """ task_url = '{}/'.format( urlparse.urljoin(settings.DATA_PROCESSING_URL, source)) try: task_req = requests.post( task_url, params={'key': settings.PRE_SHARED_KEY}, json={ 'oh_user_id': user.id, 'oh_base_url': full_url('/data-import/'), 'force': force, }) except requests.exceptions.RequestException: logger.error('Error in sending request to data processing') error_message = 'Error in call to Open Humans Data Processing.' if 'task_req' in locals() and not task_req.status_code == 200: logger.error('Non-200 response from data processing') error_message = 'Open Humans Data Processing not returning 200.' if 'error_message' in locals(): if not settings.TESTING: client.captureMessage(error_message) return 'error'
def update_device_settings(self, new_settings): for slug, value in new_settings.iteritems(): setting = self.settings_dict.get(slug) if not setting: try: app_setting = AppSetting.objects.get(service=self.service, slug=slug) setting = DeviceSetting() setting.app_setting = app_setting except AppSetting.DoesNotExist: raven_client.captureMessage( "Invalid setting. Does not exist for this app.", extra={ "token": self.token, "invalid_setting": { slug: value, }, } ) continue setting.device = self setting.value = value setting.save()
def github_api_request(self, url, method="GET", data={}): try: resp = requests.request(url="https://api.github.com{}".format(url), headers={ "Authorization": "token {}".format(self.github_token) }, method=method, json=data, timeout=10) except requests.ConnectTimeout: client.captureException() return None if resp.status_code == 204: return True if resp.status_code == 404: return None if resp.status_code != 200 and resp.status_code != 201: if resp.status_code == 401: self.github_token = "" self.save() else: client.captureMessage( "GitHub API Request Failure: {} {}\n{} {}\n".format( resp.status_code, resp.text, method, data)) return None return json.loads(resp.text)
def is_on_local_network(self): try: no_dots = self.ip.replace('.', '') ip_first_five_int = int(no_dots[0:5]) except: logger.warning('Invalid IP address error - %s' % self.ip) client.captureMessage("Invalid IP address error - %s" % self.ip) return False # Check if the user is on the local network. If they are, log it. # Check for 192.168 addresses. if (self.ip[:8] == '192.168.'): self.create_log( _account_const.IP_CHECK_LOCAL, {'result': 'Access Granted: User is on local network.'} ) logger.info('User is on local IP address - %s' % self.ip) return True # Check for 172.16.0.0 -- 172.31.255.255 elif ip_first_five_int >= 17216 and ip_first_five_int <= 17231: self.create_log( _account_const.IP_CHECK_LOCAL, {'result': 'Access Granted: User is on local network.'} ) logger.info('User is on local IP address - %s' % self.ip) return True return False
def send_appnexus_metrics_to_graphite(): from stats.appnexus.tasks import populate_graphite try: populate_graphite() except requests.Timeout as e: client.captureMessage('Appnexus reports timeouted: {0}'.format(e), level=logging.INFO)
def call_api(action=None, **kwargs): try: if settings.CONDUCTOR_CERT_PATH: auth_args = {"cert": settings.CONDUCTOR_CERT_PATH, "verify": False} else: auth_args = {} resp = requests.request(method=("POST" if action else "GET"), json={ "method": action, "args": kwargs }, url=settings.CONDUCTOR_AGENT_PATH, timeout=30, **auth_args) except requests.exceptions.ConnectionError: client.captureException() return None except requests.exceptions.ReadTimeout: client.captureException() return None if resp.status_code == 500 or resp.status_code == 400: client.captureMessage( "Conductor API Request Failure: {} {}\n{} {}\n".format( resp.status_code, resp.text, action, kwargs)) return None return json.loads(resp.text)
def dump_database_view(request, site_id): site = get_object_or_404(Site, id=site_id) if not request.user.is_superuser and not site.group.users.filter(id=request.user.id).exists(): raise PermissionDenied if request.method != "POST": return redirect("backup_database", site_id=site.id) if not site.database: messages.error(request, "No database provisioned!") return redirect("info_site", site_id=site.id) if site.database.category == "postgresql": ret, out, err = run_as_site(site, ["pg_dump", str(site.database)], timeout=60) elif site.database.category == "mysql": ret, out, err = run_as_site( site, ["mysqldump", "-u", site.database.username, "--password={}".format(site.database.password), "-h", settings.MYSQL_DB_HOST, site.database.db_name], timeout=60) if ret == 0: resp = HttpResponse(out, content_type="application/force-download") resp["Content-Disposition"] = "attachment; filename=dump_{}_{}.sql".format(site.name, datetime.datetime.now().strftime("%m%d%Y")) return resp else: messages.error(request, "Failed to export database!") client.captureMessage("Database export failed, ({}) - {} - {}".format(ret, out, err)) return redirect("info_site", site_id=site.id)
def _write_message_with_apnsclient(self, message, devices): # start with all devices in "complete" list. remove as necessary. # convert to list: attempting to avoid deadlock in "set_devices_last_notified_at" complete_devices = list(devices[:]) fail_devices = [] retry_devices = [] con = self.session.get_connection(address=(self.hostname, 2195), cert_string=self.certificate, key_string=self.private_key) srv = APNs(con) res = srv.send(message) # Check failures. Check codes in APNs reference docs. for token, reason in res.failed.items(): code, errmsg = reason # Log with sentry raven_client.captureMessage( "APNs Failure - Reason:%s - Device:%s" % (errmsg, token) ) # Disable device for device in devices: if device.token == token: complete_devices.remove(device) device.is_active = False device.save() print "Device faled: {0}, reason: {1}".format(token, errmsg) # Check failures not related to devices. for code, errmsg in res.errors: # Log with sentry raven_client.captureMessage("APNs Failure - Error:%s" % errmsg) print "Error: ", errmsg # Check if there are tokens that can be retried if res.needs_retry(): # repeat with retry_message retry_message = res.retry() # add retried devices to "retry_devices" for token in retry_message.tokens: for device in complete_devices: if device.token == token: retry_devices.append(device) # remove retried devices from "complete_devices" for device in retry_devices: complete_devices.remove(device) # retry message self._write_message_with_apnsclient(retry_message, retry_devices) # set date of last message for "complete_devices" self.set_devices_last_notified_at(complete_devices)
def get(self, request, *args, **kwargs): try: 1/0 except Exception, e: from raven.contrib.django.raven_compat.models import client # Capture an Error client.captureException() # Reporting an Event client.captureMessage('This is a message sent from medusaserver by client.captureMessage()')
def generate_salaries_using_STATScom_Projections(self, request, queryset): """ admin action to generate salaries for the selected pool based on stats.com fantasy projections :param request: :param queryset: :return: """ logger.info( 'action: salary.admin.generate_salaries_using_STATScom_Projections' ) if len(queryset) > 1: logger.warn( 'You must select only one pool to generate salaries for at a time.' ) self.message_user( request, 'You must select only one pool to generate salaries for at a time.' ) else: # should be a list of 1 item. for pool in queryset: sport = pool.site_sport.name if sport == 'nfl': logger.info('Queing NFL stats projection task.') # use STATS.com fantasy projections api as the basis for draftboard player # salaries task_result = generate_salaries_from_statscom_projections_nfl.delay( ) elif sport == 'nba': logger.info('Queing NBA stats projection task.') task_result = generate_salaries_from_statscom_projections_nba.delay( ) elif sport == 'mlb': logger.info('Queing MLB stats projection task.') task_result = generate_salaries_from_statscom_projections_mlb.delay( ) else: msg = '[%s] is unimplemented server-side. DID NOT GENERATE SALARIES for %s!' % ( sport, sport) logger.error(msg) client.captureMessage(msg) self.message_user(request, msg) return # get() is blocking and waits for task to finish task_result.get() logger.info( 'stats projection task has finished, returning to client.') # task finished. presumably salaries have been updated based on latest projections messages.success(request, 'updated salaries')
def handle_response(self): # Log out the req + res logger.info({ 'url': self.url, "action": self.action_name, "request": self.params, "response": self.response_wrapper.json, }) ip_address = self.params.get('DeviceIpAddress') or self.params.get( 'CustomerIpAddress') # Save our session info GidxSession.objects.create( user=self.user, gidx_customer_id=self.params.get('MerchantCustomerID'), session_id=self.params.get('MerchantSessionID'), service_type=self.service_type, device_location=ip_address, request_data=strip_sensitive_fields(self.params), response_data=self.response_wrapper.json, reason_codes=self.response_wrapper.json.get('ReasonCodes', None), ) # 500+ means some kind of service-level error. make sure we get notified about these. if self.response_wrapper.json['ResponseCode'] >= 500: logger.error({ 'url': self.url, "action": "%s%s" % (self.action_name, '--FAIL'), "request": self.params, "response": self.response_wrapper.response.text, }) # Send some useful information to Sentry. client.context.merge({ 'extra': { 'response_json': self.response_wrapper.json, 'response_text': self.response_wrapper.response.text, 'params': self.params, 'url': self.url, } }) client.captureMessage( "GIDX request failed - %s" % self.response_wrapper.json['ResponseMessage']) client.context.clear() raise ValidationError( detail='%s' % self.response_wrapper.json['ResponseMessage']) # a ResponseCode of 0 indicates no errors. If we had errors, raise an exception that can # be caught on the view layer. if not self.response_wrapper.json['ResponseCode'] == 0: logger.warning(self.response_wrapper.json) raise ValidationError( detail='%s' % self.response_wrapper.json['ResponseMessage'])
def _message(msg: str, log_level: int, extra: Dict = None) -> None: if GeneralConfig.is_dev(): logger.log(log_level, msg) return client.captureMessage( message=msg, level=log_level, stack=True, extra=extra, )
def send_update_error_to_sentry(user, api_response): # This is needed to not include POST data (e.g. binary image), which # was causing sentry to fail at sending sentry_client.context.clear() sentry_client.user_context( {'hashed_uuid': user.hashed_uuid, 'user_email': user.email} ) sentry_client.captureMessage( message='Updating company profile failed', data={}, extra={'api_response': str(api_response.content)} )
def send_userprofile_to_cis(instance_id=None, profile_results=[], **kwargs): import boto3 from cis.publisher import ChangeDelegate if is_test_environment() or settings.DINO_PARK_ACTIVE: return [] if not instance_id and not profile_results: return [] if instance_id: profile_results = bundle_profile_data(instance_id) sts = boto3.client('sts') sts_response = sts.assume_role( RoleArn=settings.CIS_IAM_ROLE_ARN, RoleSessionName=settings.CIS_IAM_ROLE_SESSION_NAME ) session = boto3.session.Session( aws_access_key_id=sts_response['Credentials']['AccessKeyId'], aws_secret_access_key=sts_response['Credentials']['SecretAccessKey'], aws_session_token=sts_response['Credentials']['SessionToken'], region_name=settings.CIS_AWS_REGION ) publisher = { 'id': settings.CIS_PUBLISHER_NAME } results = [] for data in profile_results: # Send data to sentry for debugging purposes cis_change = ChangeDelegate(publisher, {}, data) cis_change.boto_session = session result = cis_change.send() results.append(result) log_name = 'CIS transaction - {}'.format(data['user_id']) log_data = { 'level': logging.DEBUG, 'logger': 'mozillians.cis_transaction' } log_extra = { 'cis_transaction_data': json.dumps(data), 'cis_transaction_groups': json.dumps(data['groups']), 'cis_transaction_result': result } sentry_client.captureMessage(log_name, data=log_data, stack=True, extra=log_extra) return results
def send_userprofile_to_cis(instance_id=None, profile_results=[], **kwargs): import boto3 from cis.publisher import ChangeDelegate if is_test_environment() or settings.DINO_PARK_ACTIVE: return [] if not instance_id and not profile_results: return [] if instance_id: profile_results = bundle_profile_data(instance_id) sts = boto3.client('sts') sts_response = sts.assume_role( RoleArn=settings.CIS_IAM_ROLE_ARN, RoleSessionName=settings.CIS_IAM_ROLE_SESSION_NAME) session = boto3.session.Session( aws_access_key_id=sts_response['Credentials']['AccessKeyId'], aws_secret_access_key=sts_response['Credentials']['SecretAccessKey'], aws_session_token=sts_response['Credentials']['SessionToken'], region_name=settings.CIS_AWS_REGION) publisher = {'id': settings.CIS_PUBLISHER_NAME} results = [] for data in profile_results: # Send data to sentry for debugging purposes cis_change = ChangeDelegate(publisher, {}, data) cis_change.boto_session = session result = cis_change.send() results.append(result) log_name = 'CIS transaction - {}'.format(data['user_id']) log_data = { 'level': logging.DEBUG, 'logger': 'mozillians.cis_transaction' } log_extra = { 'cis_transaction_data': json.dumps(data), 'cis_transaction_groups': json.dumps(data['groups']), 'cis_transaction_result': result } sentry_client.captureMessage(log_name, data=log_data, stack=True, extra=log_extra) return results
def register_certificate (self, ibs_client, batch): errors = [] # Start from the default scenario status = False try: raven_client.context.activate () response = self._register_certificate_batch (ibs_client, batch) logger.info ('Certificate batch response', extra={ 'response': response } ) status = response['CreateTicketsResult'] if status is False: if 'a_sMessage' in response: try: message = ElementTree.fromstring (response['a_sMessage']) except ElementTree.ParseError: errors.append (response['a_sMessage']) else: for child in message: if child.tag == 'Error': errors.append (child.find ('ErrorMessage').text) raven_client.context.merge ({ 'extra': { 'response': response, } }) raven_client.captureMessage ('Certificate creation failed.') raven_client.context.clear () except Exception as exc: logger.exception ( 'Register certificate exception' ) status = False errors = ['An unknown error occured during certificate registration.'] if settings.DEBUG: print(exc) raven_client.captureException () return (status, errors)
def load_database_view(request, site_id): site = get_object_or_404(Site, id=site_id) if not request.user.is_superuser and not site.group.users.filter( id=request.user.id).exists(): raise PermissionDenied if request.method != "POST": return redirect("backup_database", site_id=site.id) if not site.database: messages.error(request, "No database provisioned!") return redirect("info_site", site_id=site.id) sql_file = request.FILES.get("file", None) if not sql_file: messages.error(request, "You must upload a .sql file!") return redirect("backup_database", site_id=site.id) if site.database.category == "postgresql": proc = Popen(["psql", str(site.database)], preexec_fn=demote(site.user.id, site.group.id), cwd=site.path, stdin=PIPE, stdout=PIPE, stderr=PIPE) elif site.database.category == "mysql": proc = Popen([ "mysql", "-u", site.database.username, "--password={}".format( site.database.password), "-h", settings.MYSQL_DB_HOST, site.database.db_name ], preexec_fn=demote(site.user.id, site.group.id), cwd=site.path, stdin=PIPE, stdout=PIPE, stderr=PIPE) for chunk in sql_file.chunks(): proc.stdin.write(chunk) out, err = proc.communicate() if proc.returncode == 0: messages.success(request, "Database import completed!") else: messages.error(request, "Database import failed!") client.captureMessage("Database import failed, ({}) - {} - {}".format( proc.returncode, out.decode("utf-8"), err.decode("utf-8"))) return redirect("info_site", site_id=site.id)
def toggle_data(self, user, source, public): if (source not in get_source_labels() and not source.startswith('direct-sharing-')): error_msg = ('Public sharing toggle attempted for ' 'unexpected source "{}"'.format(source)) django_messages.error(self.request, error_msg) if not settings.TESTING: raven_client.captureMessage(error_msg) participant = user.member.public_data_participant access, _ = PublicDataAccess.objects.get_or_create( participant=participant, data_source=source) access.is_public = True if public == 'True' else False access.save()
def api_request(self, url, params={}, refresh=True): s = self.get_social_auth() params.update({"format": "json"}) params.update({"access_token": s.access_token}) r = requests.get("https://ion.tjhsst.edu/api/{}".format(url), params=params) if r.status_code == 401: if refresh: try: self.get_social_auth().refresh_token(load_strategy()) except Exception: client.captureException() return self.api_request(url, params, False) else: client.captureMessage("Ion API Request Failure: {} {}".format(r.status_code, r.json())) return r.json()
def root_exec(cmd): p = Popen(shlex.split(cmd) if isinstance(cmd, str) else cmd, stdout=PIPE, stderr=PIPE) output = p.stdout.read() error = p.stderr.read() if p.wait() == 0: return True else: client.captureMessage("Failed to execute command: {}".format(cmd), extra={ "stdout": output, "stderr": error }) return False
def logging_exception_handler(exc, context): response = exception_handler(exc, context) # Log to Sentry if getattr(settings, 'RAVEN_LOG_API_ERRORS', False): raven_client.context.activate() raven_client.context.merge({ 'extra': { 'exc': exc, 'context': context, } }) raven_client.captureMessage('API failure') raven_client.context.clear() return response
def handle_response(self): # Log out the req + res logger.info({ 'url': self.url, "action": self.action_name, "request": self.params, "response": self.response_wrapper.json, }) # 500+ means some kind of service-level error. make sure we get notified about these. if self.response_wrapper.json['ResponseCode'] >= 500: logger.error({ 'url': self.url, "action": "%s%s" % (self.action_name, '--FAIL'), "request": self.params, "response": '%s - %s' % (self.response_wrapper, self.response_wrapper.response.text), }) # Send some useful information to Sentry. client.context.merge({ 'extra': { 'response_json': self.response_wrapper.json, 'response_text': self.response_wrapper.response.text, 'params': self.params, 'url': self.url, } }) client.captureMessage( "GIDX request failed - REGISTRATION_STATUS_REQUEST") client.context.clear() raise ValidationError( detail='%s' % self.response_wrapper.json['RegistrationStatusMessage']) # a ResponseCode of 0 indicates no errors. If we had errors, raise an exception that can # be caught on the view layer. if not self.response_wrapper.json['ResponseCode'] == 0: logger.warning(self.response_wrapper.json) raise ValidationError( detail='%s' % self.response_wrapper.json['RegistrationStatusMessage'])
def check_for_vpn(self, flag="m", subdomain=settings.GETIPNET_SUBDOMAIN): url = 'https://%s.getipintel.net/check.php?ip=%s&contact=%s&flags=%s' response = requests.get( url % (subdomain, self.ip, settings.GETIPNET_CONTACT, flag) ) if response.status_code == 200: value = float(response.content) result = True if value < settings.GETIPNET_NORMAL else False msg = '' if result else MODAL_MESSAGES['VPN']['message'] # Log all failed attempts. if result is False: logger.info('check_for_vpn Failed. risk value: %s' % value) self.create_log( _account_const.IP_CHECK_STATUS, {'result': 'Access Denied, Risk value: %s' % value} ) else: logger.info('check_for_vpn Passed. risk value: %s' % value) return result, msg # in case of out of limit elif response.status_code == 429: logger.error("getipintel.net 429 response: %s" % response.reason) client.context.merge({'extra': { 'reason': response.reason, 'status_code': response.status_code, 'ip': self.ip, 'user': self.user.username, }}) client.captureMessage("Unexpected getipintel.net response: %s" % str(response)) client.context.clear() return True, response.reason # service not working else: logger.error("Unexpected getipintel.net response: %s" % response.reason) client.context.merge({'extra': { 'reason': response.reason, 'status_code': response.status_code, 'ip': self.ip, 'user': self.user.username, }}) client.captureMessage("Unexpected getipintel.net response: %s" % str(response)) client.context.clear() return True, ''
def tryHard(url): data = None counter = 0 while not data or data.status_code != 200: try: data = requests.get(url) except: pass if counter > 5: client.captureMessage(url + ' je zahinavu več ko 2x.') print(url + ' je zahinavu več ko 2x.') return None counter += 1 if not data: time.sleep(1) print("sleep") return data
def generate_ssl_certificate(domain, renew=False): """Generate SSL certs for a domain and update the nginx config.""" process = Popen([ "/usr/bin/certbot", "certonly", "--webroot", "-w", settings.LE_WEBROOT, "-d", domain.domain, "-n" ]) success = process.wait() == 0 if success: if not renew: create_config_files(domain.site) reload_services() else: client.captureMessage( "Failed to generate SSL certificate for domain {} on site {}". format(domain.domain, domain.site.name))
def __call__(instance, arg): try: data = json.loads(arg) attempt = int(data.pop(u'__attempt', 0)) except (TypeError, ValueError) as e: return instance.f(arg) try: return instance.f(data) except BeanstalkRetryError as e: try: job = settings.BEANSTALK_JOB_NAME % { u'app': instance.app, u'job': instance.__name__, } except AttributeError: job = u"{}.{}".format(instance.app, instance.__name__) if attempt < self.max_retries: if self.warn_after is not None and attempt == (self.warn_after - 1): msg = u"Approaching max retry attempts for {}.".format(job) warn_data = { 'extra': { 'Job': job, 'Attempt number': attempt, 'Warn after': self.warn_after, 'Max retries': self.max_retries, 'Job data': data, } } raven_client.captureMessage(msg, data=warn_data, stack=True, level=logging.WARN) data[u'__attempt'] = attempt + 1 beanstalk_client = BeanstalkClient() beanstalk_client.call(job, json.dumps(data), delay=(2 ** attempt), priority=self.priority, ttr=self.ttr) else: msg = u"Exceeded max retry attempts for {}.".format(job) error_data = e.data if e.data is not None else {} raven_client.captureMessage(msg, data=error_data, stack=True) if e.should_email: send_mail(e.email_subject, e.email_body, settings.DEFAULT_FROM_EMAIL, [e.email_address], fail_silently=False) except Exception as e: raven_client.captureException()
def submit_scholarship(req): if req.method == 'GET': context = { 'page_title': 'About Scholar Hippo' } return render_to_response('submit_scholarship.html', context) elif req.method == 'POST': payload = json.loads(req.body) submitted_scholarship = SubmittedLink(title=payload['title'], third_party_url=payload['url']) if 'email' in payload: submitted_scholarship.email = payload['email'] submitted_scholarship.save() mp = Mixpanel('2871f3b0cb686b7f9fff1ba66d042817') mp.track(0, 'submission', { 'title': payload['title'] }) client.captureMessage("New scholarship submitted.", title=payload['title']) return HttpResponse(json.dumps({'msg': 'thanks dawg'}))
def sns_handler(request): if verify_sns_notification(request): content = json.loads(request.body.decode()) message_type = request.META.get('HTTP_X_AMZ_SNS_MESSAGE_TYPE', None) if message_type == SNS_MESSAGE_TYPE_SUB_NOTIFICATION: urlopen(content['SubscribeURL']) elif message_type == SNS_MESSAGE_TYPE_NOTIFICATION: raven_client.context.activate() raven_client.context.merge({ 'extra': { 'notification': content, } }) raven_client.captureMessage('Bounce notification') raven_client.context.clear() return HttpResponse(status=200)
def _execute_search_query(query): response = query.params( request_timeout=settings.ES_SEARCH_REQUEST_TIMEOUT).execute() if response.took >= settings.ES_SEARCH_REQUEST_WARNING_THRESHOLD * 1000: logger.warning( f'Elasticsearch query took a long time ({response.took/1000:.2f} seconds)' ) client.captureMessage( 'Elasticsearch query took a long time', extra={ 'query': query.to_dict(), 'took': response.took, 'timed_out': response.timed_out, }, ) return response
def handle_response(self): # Log out the req + res logger.info({ 'url': self.url, "action": self.action_name, "request": self.params, "response": self.response_wrapper.json, }) # Save our session info GidxSession.objects.create( user=self.user, gidx_customer_id=get_customer_id_for_user(self.user), session_id=self.response_wrapper.json['MerchantSessionID'], service_type=self.service_type, request_data=strip_sensitive_fields(self.params), response_data=self.response_wrapper.json, ) # 500+ means some kind of service-level error. make sure we get notified about these. if self.response_wrapper.json['ResponseCode'] >= 500: logger.error({ 'url': self.url, "action": "%s%s" % (self.action_name, '--FAIL'), "request": self.params, "response": self.response_wrapper.response.text, }) # Send some useful information to Sentry. client.context.merge({ 'extra': { 'response_json': self.response_wrapper.json, 'response_text': self.response_wrapper.response.text, 'params': self.params, 'url': self.url, } }) client.captureMessage( "GIDX request failed - %s" % self.response_wrapper.json['ResponseMessage']) client.context.clear() raise ValidationError( detail='%s' % self.response_wrapper.json['ResponseMessage'])
def receive(self, text_data=None, bytes_data=None): # we are going to disable datachannel for shared printer connections # by tampering janus offer/answer messages msg = json.loads(text_data) if 'jsep' in msg and msg['jsep']['type'] == 'answer': sdp = msg['jsep']['sdp'] if 'BUNDLE video\r\n' not in sdp: # frontend should request only video, # if thats's not the case, then something went wrong # with patching the offer (bellow) sentryClient.captureMessage( 'bad sdp bundle', extra={'sdp': sdp} ) return channels.send_msg_to_printer(self.printer.id, {'janus': text_data})
def save_report(req): if req.method != 'POST': return HttpResponse('ya gotta send a post request') form = json.loads(req.body) problem = form['problem'] explanation = form['explanation'] report = Report() report.problem = problem report.explanation = explanation report.ip_address = get_client_ip(req) scholarship_id = decrypt_sk(form['sk']) scholarship = Scholarship.objects.get(id=scholarship_id) report.scholarship = scholarship report.save() # tell sentry we got a report client.captureMessage("Scholarship problem reported.", problem=problem, explanation=explanation) return HttpResponse('{"msg": "thanks"}', content_type='application/json')
def handle_missing_data(instance): job = instance.get_job_name() if instance.attempt < self.max_retries: instance.beanstalk_data['attempt'] = instance.attempt + 1 backoff = 2 ** instance.attempt beanstalk_client = BeanstalkClient() beanstalk_client.call(job, json.dumps(instance.beanstalk_data), delay=backoff, ttr=self.ttr) else: msg = u"Exceeded max retry attempts for {}.".format(job) culprit = instance.get_sentry_culprit("handle_missing_data") error_data = { "culprit": culprit, "extra": { "Job name": job, "Attempt number": instance.attempt, "Beanstalk Data": instance.beanstalk_data, } } raven_client.captureMessage(msg, data=error_data, stack=True)
def save_log_data_to_json(log_data, workingDir): saved_files = {} for report_category in log_data: try: reportDir = workingDir if not os.path.exists(reportDir): os.mkdir(reportDir) prefix = REPORT_FILE_PREFIXES[report_category] jsonFileName = reportDir + prefix + "_" + str(datetime.datetime.today()).split(' ')[0] + "_" \ + str(datetime.datetime.today()).split(' ')[1].split('.')[0].replace(':', '-') + ".json" jsonFile = open(jsonFileName, "a+") jsonFile.write(json.dumps(log_data[report_category])) jsonFile.close() saved_files[report_category] = jsonFileName except Exception, ex: print ex print "Error while writing DroidBox output to JSON!" client.captureMessage(message='Error while writing DroidBox output to JSON!', level='error', extra={'sha256':str(hashValue[2])}, tags={'file':'parseDynamicLogFile.py'})
def process_response(self, request, response): with log_exception_ctx(): if "HTTP_X_REAL_IP" in request.META: client_ip = request.META["HTTP_X_REAL_IP"] else: client_ip = request.META["REMOTE_ADDR"] start_time = getattr(request, 'start_time', time.time()) proc_time = int((time.time() - start_time) * 1000) path = request.get_full_path() # TODO: '`' split list, not dict record = [path, proc_time, request.method, request.GET, request._body_, request.FILES.keys(), client_ip, request.META.get('CONTENT_LENGTH'), request.META.get('HTTP_USER_AGENT'), request.META.get('HTTP_REFERER'), request.META.get('CONTENT_TYPE'), response.status_code, len(response.content)] log_access(record) if proc_time > 1000: client.captureMessage("Slow request: %s" % path, request=request, extra={'proc_time': proc_time}) return response
def push_empty_groups_to_cis(sender, instance, **kwargs): """Notify CIS about the profile deletion. Remove all the access groups and tags from the profile. """ from mozillians.users.tasks import send_userprofile_to_cis data = bundle_profile_data(instance.id, delete=True) for d in data: log_name = 'CIS group deletion - {}'.format(d['user_id']) log_data = { 'level': logging.DEBUG, 'logger': 'mozillians.cis_transaction' } log_extra = { 'cis_transaction_data': json.dumps(d) } sentry_client.captureMessage(log_name, data=log_data, stack=True, extra=log_extra) send_userprofile_to_cis.delay(profile_results=data)
def generate_thumbnail(channel): THUMBNAIL_DIMENSION = 200 if channel.icon_encoding: return channel.icon_encoding elif channel.thumbnail_encoding: return ast.literal_eval(channel.thumbnail_encoding).get('base64') elif channel.thumbnail: try: checksum, ext = os.path.splitext(channel.thumbnail) filepath = generate_file_on_disk_name(checksum, channel.thumbnail) buffer = StringIO.StringIO() with Image.open(filepath) as image: width, height = image.size dimension = min([THUMBNAIL_DIMENSION, width, height]) image.thumbnail((dimension, dimension), Image.ANTIALIAS) image.save(buffer, image.format) return "data:image/{};base64,{}".format(ext[1:], base64.b64encode(buffer.getvalue())) except IOError: client.captureMessage("Failed to generate thumbnail for channel id={}, filepath={}".format( channel.id, filepath)) pass
def toggle_data(self, user, source, public): if source not in get_source_labels() and not source.startswith( "direct-sharing-" ): error_msg = ( "Public sharing toggle attempted for " 'unexpected source "{}"'.format(source) ) django_messages.error(self.request, error_msg) if not settings.TESTING: raven_client.captureMessage(error_msg) return project = id_label_to_project(source) project_membership = DataRequestProjectMember.objects.get( member=user.member, project=project ) participant = user.member.public_data_participant access, _ = PublicDataAccess.objects.get_or_create( participant=participant, project_membership=project_membership ) access.is_public = False if public == "True": if not project.no_public_data: access.is_public = True access.save() if ( project.approved and not ActivityFeed.objects.filter( member=user.member, project=project, action="publicly-shared" ).exists() ): event = ActivityFeed( member=user.member, project=project, action="publicly-shared" ) event.save()
def get(self, request, zipped_filename, embedded_filepath): """ Handles GET requests and serves a static file from within the zip file. """ if not VALID_STORAGE_FILENAME.match(zipped_filename): return HttpResponseNotFound("'{}' is not a valid URL for this zip file".format(zipped_filename)) storage = default_storage # calculate the local file path to the zip file filename, ext = os.path.splitext(zipped_filename) zipped_path = generate_object_storage_name(filename, zipped_filename) # file size file_size = 0 # if the zipfile does not exist on disk, return a 404 if not storage.exists(zipped_path): return HttpResponseNotFound('"%(filename)s" does not exist in storage' % {'filename': zipped_path}) # if client has a cached version, use that (we can safely assume nothing has changed, due to MD5) if request.META.get('HTTP_IF_MODIFIED_SINCE'): return HttpResponseNotModified() zf_obj = storage.open(zipped_path) try: with zipfile.ZipFile(zf_obj) as zf: # if no path, or a directory, is being referenced, look for an index.html file if not embedded_filepath or embedded_filepath.endswith("/"): embedded_filepath += "index.html" # get the details about the embedded file, and ensure it exists try: info = zf.getinfo(embedded_filepath) except KeyError: return HttpResponseNotFound('"{}" does not exist inside "{}"'.format(embedded_filepath, zipped_filename)) # try to guess the MIME type of the embedded file being referenced content_type = mimetypes.guess_type(embedded_filepath)[0] or 'application/octet-stream' if not os.path.splitext(embedded_filepath)[1] == '.json': # generate a streaming response object, pulling data from within the zip file response = FileResponse(zf.open(info), content_type=content_type) file_size = info.file_size else: # load the stream from json file into memory, replace the path_place_holder. content = zf.open(info).read() str_to_be_replaced = ('$' + exercises.IMG_PLACEHOLDER).encode() zipcontent = ('/' + request.resolver_match.url_name + "/" + zipped_filename).encode() content_with_path = content.replace(str_to_be_replaced, zipcontent) response = HttpResponse(content_with_path, content_type=content_type) file_size = len(content_with_path) except zipfile.BadZipfile: just_downloaded = getattr(zf_obj, 'just_downloaded', "Unknown (Most likely local file)") client.captureMessage("Unable to open zip file. File info: name={}, size={}, mode={}, just_downloaded={}".format( zf_obj.name, zf_obj.size, zf_obj.mode, just_downloaded)) return HttpResponseServerError("Attempt to open zip file failed. Please try again, and if you continue to receive this message, please check that the zip file is valid.") # set the last-modified header to the date marked on the embedded file if info.date_time: response["Last-Modified"] = http_date(time.mktime(datetime.datetime(*info.date_time).timetuple())) # cache these resources forever; this is safe due to the MD5-naming used on content files response["Expires"] = "Sun, 17-Jan-2038 19:14:07 GMT" # set the content-length header to the size of the embedded file if file_size: response["Content-Length"] = file_size # ensure the browser knows not to try byte-range requests, as we don't support them here response["Accept-Ranges"] = "none" _add_access_control_headers(request, response) # restrict CSP to only allow resources to be loaded from the Studio host, to prevent info leakage # (e.g. via passing user info out as GET parameters to an attacker's server), or inadvertent data usage host = request.build_absolute_uri('/').strip("/") response["Content-Security-Policy"] = "default-src 'self' 'unsafe-inline' 'unsafe-eval' data: " + host return response