def handle_error(self, context): beeline.add_context_field( "db.error", beeline.internal.stringify_exception(context.original_exception)) if self.state.span: beeline.finish_span(self.state.span) self.state.span = None
def availability(zip_code): beeline.add_context_field('zip code', zip_code) with beeline.tracer(name='load stores in zip code'): store_response = requests.get( 'https://www.riteaid.com/services/ext/v2/stores/getStores', params={ 'address': zip_code, 'attrFilter': 'PREF-112', 'fetchMechanismVersion': '2', 'radius': '50', }, ).json()['Data']['stores'] stores = [] threads = [] with beeline.tracer(name='get availability in stores'): for store_data in store_response: store_id = store_data['storeNumber'] t = threading.Thread(target=beeline.traced_thread(get_store_data_thread), args=(store_id, store_data, stores)) threads.append(t) t.start() for t in threads: t.join() beeline.add_context_field('possible_availability', sum(1 for i in stores if i['possible_availability'])) return jsonify(stores)
def _start_response(status, headers, *args): status_code = int(status[0:4]) beeline.add_context_field("response.status_code", status_code) if not signals.signals_available: beeline.finish_trace(root_span) return start_response(status, headers, *args)
def process_view(self, request, view_func, view_args, view_kwargs): if beeline.get_beeline(): try: beeline.add_context_field("django.view_func", view_func.__name__) except AttributeError: pass
def _beeline_wrapper(event, context): global COLD_START # don't blow up the world if the beeline has not been initialized if not beeline.get_beeline(): return handler(event, context) root_span = None try: # Create request context request_context = { "app.function_name": getattr(context, 'function_name', ""), "app.function_version": getattr(context, 'function_version', ""), "app.request_id": getattr(context, 'aws_request_id', ""), "app.event": event, "meta.cold_start": COLD_START, "name": handler.__name__ } lr = LambdaRequest(event) root_span = beeline.propagate_and_start_trace(request_context, lr) # Actually run the handler resp = handler(event, context) if resp is not None: beeline.add_context_field('app.response', resp) return resp finally: # This remains false for the lifetime of the module COLD_START = False beeline.finish_trace(root_span) # we have to flush events before the lambda returns beeline.get_beeline().client.flush()
def event_post(): print(request.headers) if getattr(request, 'content_encoding', None) == 'deflate': rdata = zlib.decompress(request.data) else: rdata = request.data req = json.loads(rdata) extra = {} if 'X-Td-Write-Key' in request.headers: extra = dict([ param.split('=') for param in request.headers['X-Td-Write-Key'].split(',') ]) # The only table we support right now is pebble.phone_events. resp = {} if 'pebble.phone_events' in req: toinsert = [ submit_event(ev, extra) for ev in req['pebble.phone_events'] ] errors = bq.insert_rows_json(config['BIGQUERY_TABLE'], toinsert) if errors != []: raise RuntimeError(errors) # Sigh. resp['pebble.phone_events'] = [{"success": "true"} for _ in toinsert] beeline.add_context_field('treasure.events.count', len(resp['pebble.phone_events'])) return jsonify(resp)
def cohort(user): if 'uid' in user: beeline.add_context_field('user', user['uid']) select = request.args['select'].split(',') response = {} for entry in select: if entry not in generators: abort(400) response[entry] = generators[entry]() return jsonify(response)
def get_access_token(): access_token = request.args.get('access_token') if not access_token: header = request.headers.get('Authorization') if header: auth = header.split(' ') if len(auth) == 2 and auth[0] == 'Bearer': access_token = auth[1] if not access_token: abort(401) beeline.add_context_field('access_token', access_token[-8:]) return access_token
def create_http_event(self, request): # Code to be executed for each request before # the view (and later middleware) are called. trace_id, parent_id, context = _get_trace_context(request) trace_name = "django_http_%s" % request.method.lower() trace = beeline.start_trace(context={ "name": trace_name, "type": "http_server", "request.host": request.get_host(), "request.method": request.method, "request.path": request.path, "request.remote_addr": request.META.get('REMOTE_ADDR'), "request.content_length": request.META.get('CONTENT_LENGTH', 0), "request.user_agent": request.META.get('HTTP_USER_AGENT'), "request.scheme": request.scheme, "request.secure": request.is_secure(), "request.query": request.GET.dict(), "request.xhr": request.is_ajax(), "request.post": request.POST.dict() }, trace_id=trace_id, parent_span_id=parent_id) if isinstance(context, dict): for k, v in context.items(): beeline.add_trace_field(k, v) response = self.get_response(request) # Code to be executed for each request/response after # the view is called. beeline.add_context_field("response.status_code", response.status_code) beeline.finish_trace(trace) return response
def _beeline_wrapper(event, context): global COLD_START # don't blow up the world if the beeline has not been initialized if not beeline.get_beeline(): return handler(event, context) try: # assume we're going to get bad values sometimes in our headers trace_id, parent_id, trace_context = None, None, None try: trace_id, parent_id, trace_context = _get_trace_data(event) except Exception as e: beeline.internal.log( 'error attempting to extract trace context: %s', beeline.internal.stringify_exception(e)) pass with beeline.tracer(name=handler.__name__, trace_id=trace_id, parent_id=parent_id): beeline.add_context({ "app.function_name": getattr(context, 'function_name', ""), "app.function_version": getattr(context, 'function_version', ""), "app.request_id": getattr(context, 'aws_request_id', ""), "app.event": event, "meta.cold_start": COLD_START, }) # if there is custom context attached from upstream, add that now if isinstance(trace_context, dict): for k, v in trace_context.items(): beeline.add_trace_field(k, v) resp = handler(event, context) if resp is not None: beeline.add_context_field('app.response', resp) return resp finally: # This remains false for the lifetime of the module COLD_START = False # we have to flush events before the lambda returns beeline.get_beeline().client.flush()
def _beeline_wrapper(event, context): global COLD_START # don't blow up the world if the beeline has not been initialized if not beeline.get_beeline(): return handler(event, context) root_span = None try: # Create request context request_context = { "app.function_name": getattr(context, 'function_name', ""), "app.function_version": getattr(context, 'function_version', ""), "app.request_id": getattr(context, 'aws_request_id', ""), "meta.cold_start": COLD_START, "name": handler.__name__ } if record_input: request_context["app.event"] = event lr = LambdaRequest(event) root_span = beeline.propagate_and_start_trace(request_context, lr) # Actually run the handler resp = handler(event, context) if resp is not None and record_output: beeline.add_context_field('app.response', resp) return resp except Exception as e: beeline.add_context({ "app.exception_type": str(type(e)), "app.exception_string": beeline.internal.stringify_exception(e), "app.exception_stacktrace": traceback.format_exc(), }) raise e finally: # This remains false for the lifetime of the module COLD_START = False beeline.finish_trace(root_span) # we have to flush events before the lambda returns beeline.get_beeline().client.flush()
def __call__(self, execute, sql, params, many, context): vendor = context['connection'].vendor trace_name = "django_%s_query" % vendor with beeline.tracer(trace_name): beeline.add_context({ "type": "db", "db.query": sql, "db.query_args": params, }) try: db_call_start = datetime.datetime.now() result = execute(sql, params, many, context) db_call_diff = datetime.datetime.now() - db_call_start beeline.add_context_field("db.duration", db_call_diff.total_seconds() * 1000) except Exception as e: beeline.add_context_field("db.error", str(type(e))) beeline.add_context_field( "db.error_detail", beeline.internal.stringify_exception(e)) raise else: return result finally: if vendor in ('postgresql', 'mysql'): beeline.add_context({ "db.last_insert_id": context['cursor'].cursor.lastrowid, "db.rows_affected": context['cursor'].cursor.rowcount, })
def user_pin(pin_id): try: user_token = request.headers.get('X-User-Token') user_id, app_uuid, data_source = get_locker_info(user_token) except ValueError: return api_error(410) if request.method == 'PUT': pin_json = request.json if not pin_valid(pin_id, pin_json): beeline.add_context_field('timeline.failure.cause', 'pin_valid') return api_error(400) pin = TimelinePin.query.filter_by(app_uuid=app_uuid, user_id=user_id, id=pin_id).one_or_none() if pin is None: # create pin pin = TimelinePin.from_json(pin_json, app_uuid, user_id, data_source, 'web') if pin is None: beeline.add_context_field('timeline.failure.cause', 'from_json') return api_error(400) user_timeline = UserTimeline(user_id=user_id, type='timeline.pin.create', pin=pin) db.session.add(pin) db.session.add(user_timeline) db.session.commit() else: # update pin try: pin.update_from_json(pin_json) # Clean up old UserTimeline events first. Note that this # has to be transactional with creating the new one -- # which, luckily, it is! UserTimeline.query.filter(UserTimeline.pin == pin).delete() user_timeline = UserTimeline(user_id=user_id, type='timeline.pin.create', pin=pin) db.session.add(pin) db.session.add(user_timeline) db.session.commit() except (KeyError, ValueError): beeline.add_context_field('timeline.failure.cause', 'update_pin') return api_error(400) elif request.method == 'DELETE': pin = TimelinePin.query.filter_by(app_uuid=app_uuid, user_id=user_id, id=pin_id).first_or_404() # No need to post even old create events, since nobody will render # them, after all. UserTimeline.query.filter(UserTimeline.pin == pin).delete() user_timeline = UserTimeline(user_id=user_id, type='timeline.pin.delete', pin=pin) db.session.add(user_timeline) db.session.commit() return 'OK'
def get_locker_info(user_token): if user_token is None: raise ValueError sandbox_token = SandboxToken.query.filter_by(token=user_token).one_or_none() if sandbox_token is not None: beeline.add_context_field('user', sandbox_token.user_id) beeline.add_context_field('app_uuid', sandbox_token.app_uuid) return sandbox_token.user_id, sandbox_token.app_uuid, f"sandbox-uuid:{sandbox_token.app_uuid}" else: result = requests.get(f"{config['APPSTORE_API_URL']}/api/v1/locker/by_token/{user_token}", headers={"Authorization": f"Bearer {config['SECRET_KEY']}"}) if result.status_code != 200: raise ValueError locker_info = result.json() beeline.add_context_field('user', locker_info['user_id']) beeline.add_context_field('app_uuid', locker_info['app_uuid']) return locker_info['user_id'], locker_info['app_uuid'], f"uuid:{locker_info['app_uuid']}"
def verify_timestamp(timestamp, time_limit=timedelta(minutes=5)): beeline.add_context_field('signature.time_limit_seconds', time_limit.total_seconds()) try: message_dt = datetime.fromtimestamp(float(timestamp), tz=timezone.utc) beeline.add_context_field('signature.timestamp', message_dt.isoformat()) now_dt = datetime.now(timezone.utc) beeline.add_context_field('signature.now', now_dt.isoformat()) time_drift = now_dt - message_dt beeline.add_context_field('signature.time_drift_seconds', time_drift.total_seconds()) if abs(time_drift) > time_limit: raise BadTimestampError("Timestamp out of range") except (ValueError, OverflowError) as exc: raise BadTimestampError("Invalid timestamp") from exc
def request(_request, instance, args, kwargs): span = beeline.start_span(context={"meta.type": "http_client"}) b = beeline.get_beeline() if b and b.http_trace_propagation_hook is not None: new_headers = beeline.http_trace_propagation_hook() if new_headers: b.log( "requests lib - adding trace context to outbound request: %s", new_headers) instance.headers.update(new_headers) else: b.log("requests lib - no trace context found") try: resp = None # Required as Python treats the `or` keyword differently in string # interpolation vs. when assigning a variable. method = kwargs.get('method') or args[0] beeline.add_context({ "name": "requests_%s" % method, "request.method": method, "request.url": kwargs.get('url') or args[1], }) resp = _request(*args, **kwargs) return resp except Exception as e: beeline.add_context({ "request.error_type": str(type(e)), "request.error": beeline.internal.stringify_exception(e), }) raise finally: if resp is not None: content_type = resp.headers.get('content-type') if content_type: beeline.add_context_field("response.content_type", content_type) content_length = resp.headers.get('content-length') if content_length: beeline.add_context_field("response.content_length", content_length) if hasattr(resp, 'status_code'): beeline.add_context_field("response.status_code", resp.status_code) beeline.finish_span(span)
def request(_request, instance, args, kwargs): span = beeline.start_span(context={"meta.type": "http_client"}) b = beeline.get_beeline() if b: context = b.tracer_impl.marshal_trace_context() if context: b.log( "requests lib - adding trace context to outbound request: %s", context) instance.headers['X-Honeycomb-Trace'] = context else: b.log("requests lib - no trace context found") try: resp = None beeline.add_context({ "name": "requests_%s" % kwargs.get('method') or args[0], "request.method": kwargs.get('method') or args[0], "request.url": kwargs.get('url') or args[1], }) resp = _request(*args, **kwargs) return resp except Exception as e: beeline.add_context({ "request.error_type": str(type(e)), "request.error": beeline.internal.stringify_exception(e), }) raise finally: if resp: content_type = resp.headers.get('content-type') if content_type: beeline.add_context_field("response.content_type", content_type) content_length = resp.headers.get('content-length') if content_length: beeline.add_context_field("response.content_length", content_length) if hasattr(resp, 'status_code'): beeline.add_context_field("response.status_code", resp.status_code) beeline.finish_span(span)
def _urllibopen(_urlopen, instance, args, kwargs): # urlopen accepts either a string URL or a Request object as its first arg # It's easier to process the info contained in the request and modify it # by converting the URL string into a Request if type(args[0]) != urllib.request.Request: args = (urllib.request.Request(args[0]), ) + tuple(args[1:]) span = beeline.start_span(context={"meta.type": "http_client"}) b = beeline.get_beeline() if b and b.http_trace_propagation_hook is not None: new_headers = beeline.http_trace_propagation_hook() if new_headers: # Merge the new headers into the existing headers for the outbound request b.log("urllib lib - adding trace context to outbound request: %s", new_headers) args[0].headers.update(new_headers) try: resp = None beeline.add_context({ "name": "urllib_%s" % args[0].get_method(), "request.method": args[0].get_method(), "request.uri": args[0].full_url }) resp = _urlopen(*args, **kwargs) return resp except Exception as e: beeline.add_context({ "request.error_type": str(type(e)), "request.error": beeline.internal.stringify_exception(e), }) raise finally: if resp: beeline.add_context_field("response.status_code", resp.status) content_type = resp.getheader('content-type') if content_type: beeline.add_context_field("response.content_type", content_type) content_length = resp.getheader('content-length') if content_length: beeline.add_context_field("response.content_length", content_length) beeline.finish_span(span)
def _urllibopen(_urlopen, instance, args, kwargs): if type(args[0]) != urllib.request.Request: args[0] = urllib.request.Request(args[0]) span = beeline.start_span(context={"meta.type": "http_client"}) b = beeline.get_beeline() if b: context = b.tracer_impl.marshal_trace_context() if context: b.log("urllib lib - adding trace context to outbound request: %s", context) args[0].headers['X-Honeycomb-Trace'] = context else: b.log("urllib lib - no trace context found") try: resp = None beeline.add_context({ "name": "urllib_%s" % args[0].get_method(), "request.method": args[0].get_method(), "request.uri": args[0].full_url }) resp = _urlopen(*args, **kwargs) return resp except Exception as e: beeline.add_context({ "request.error_type": str(type(e)), "request.error": beeline.internal.stringify_exception(e), }) raise finally: if resp: beeline.add_context_field("response.status_code", resp.status) content_type = resp.getheader('content-type') if content_type: beeline.add_context_field("response.content_type", content_type) content_length = resp.getheader('content-length') if content_length: beeline.add_context_field("response.content_length", content_length) beeline.finish_span(span)
def generate_fw(): # pull these all out for reference even though we don't use them all right now. hardware = request.args['hardware'] mobile_platform = request.args['mobilePlatform'] mobile_version = request.args['mobileVersion'] mobile_hardware = request.args['mobileHardware'] pebble_app_version = request.args['pebbleAppVersion'] beeline.add_context_field('user.hardware', hardware) beeline.add_context_field('user.mobile_platform', mobile_platform) beeline.add_context_field('user.pebble_app_version', pebble_app_version) if hardware not in fw_config['hardware']: abort(400) fw = fw_config['hardware'][hardware] response = {} if 'normal' in fw: response['normal'] = build_fw_block(hardware, 'normal') if 'recovery' in fw: response['recovery'] = build_fw_block(hardware, 'recovery') return response
def geocode(latitude, longitude): if not request.args.get('access_token'): abort(401) user_req = requests.get(f"{auth_internal}/api/v1/me", headers={'Authorization': f"Bearer {request.args['access_token']}"}) if user_req.status_code == 401: abort(401) user_req.raise_for_status() if not user_req.json()['is_subscribed']: raise HTTPPaymentRequired() beeline.add_context_field("user", user_req.json()['uid']) units = request.args.get('units', 'h') language = request.args.get('language', 'en-US') beeline.add_context_field("weather.language", language) beeline.add_context_field("weather.units", units) forecast_req = requests.get(f"{ibm_root}/geocode/{latitude}/{longitude}/forecast/daily/7day.json?language={language}&units={units}") forecast_req.raise_for_status() forecast = forecast_req.json() current_req = requests.get(f"{ibm_root}/geocode/{latitude}/{longitude}/observations.json?language={language}&units={units}") current_req.raise_for_status() current = current_req.json() observation = current['observation'] old_style_conditions = { 'metadata': current['metadata'], 'observation': { 'class': observation['class'], 'expire_time_gmt': observation['expire_time_gmt'], 'obs_time': observation['valid_time_gmt'], # 'obs_time_local': we don't know. 'wdir': observation['wdir'], 'icon_code': observation['wx_icon'], 'icon_extd': observation['icon_extd'], # sunrise: we don't know these, but we could yank them out of the forecast for today. # sunset 'day_ind': observation['day_ind'], 'uv_index': observation['uv_index'], # uv_warning: I don't even know what this is. Apparently numeric. # wxman: ??? 'obs_qualifier_code': observation['qualifier'], 'ptend_code': observation['pressure_tend'], 'dow': datetime.datetime.utcfromtimestamp(observation['valid_time_gmt']).strftime('%A'), 'wdir_cardinal': observation['wdir_cardinal'], # sometimes this is "CALM", don't know if that's okay 'uv_desc': observation['uv_desc'], # I'm just guessing at how the three phrases map. 'phrase_12char': observation['blunt_phrase'] or observation['wx_phrase'], 'phrase_22char': observation['terse_phrase'] or observation['wx_phrase'], 'phrase_32char': observation['wx_phrase'], 'ptend_desc': observation['pressure_desc'], # sky_cover: we don't seem to get a description of this? 'clds': observation['clds'], 'obs_qualifier_severity': observation['qualifier_svrty'], # vocal_key: we don't get one of these {'e': 'imperial', 'm': 'metric', 'h': 'uk_hybrid'}[units]: { 'wspd': observation['wspd'], 'gust': observation['gust'], 'vis': observation['vis'], # mslp: don't know what this is but it doesn't map to anything 'altimeter': observation['pressure'], 'temp': observation['temp'], 'dewpt': observation['dewPt'], 'rh': observation['rh'], 'wc': observation['wc'], 'hi': observation['heat_index'], 'feels_like': observation['feels_like'], # temp_change_24hour, temp_max_24hour, temp_min_24hour, pchange: don't get any of these # {snow,precip}_{{1,6,24}hour,mtd,season,{2,3,7}day}: don't get these either # ceiling, obs_qualifier_{100,50,32}char: or these. # these are all now in their own request that you can pay extra to retrieve. }, } } return jsonify( fcstdaily7={ 'errors': False, 'data': forecast, }, conditions={ 'errors': False, 'data': old_style_conditions, }, metadata={ 'version': 2, 'transaction_id': str(int(time.time())), }, )
def before_request(): beeline.add_context_field("route", request.endpoint) if current_user.is_authenticated: beeline.add_context_field("user", current_user.id)
def before_request(): beeline.add_context_field("route", request.endpoint) if session.get('access_token'): beeline.add_context_field("access_token", session['access_token'][-6:])
def get_uid(): result = demand_authed_request( 'GET', f"{config['REBBLE_AUTH_URL']}/api/v1/me?flag_authed=true") beeline.add_context_field('user', result.json()['uid']) return result.json()['uid']
def api_error(code): response = jsonify(ERROR_CODES[code]) response.status_code = code beeline.add_context_field('timeline.failure', ERROR_CODES[code]['errorCode']) return response
def _start_response(status, headers, *args): beeline.add_context_field("response.status_code", status) beeline.finish_trace(trace) return start_response(status, headers, *args)
def pin_valid(pin_id, pin_json): try: if pin_json is None or pin_json.get('id') != pin_id: beeline.add_context_field('timeline.failure.details', 'parse_failure_or_id_mismatch') return False if not time_valid(parse_time(pin_json['time'])): beeline.add_context_field('timeline.failure.details', 'invalid_time') return False if 'createNotification' in pin_json and 'time' in pin_json[ 'createNotification']: beeline.add_context_field('timeline.failure.details', 'invalid_time_attribute') return False # The createNotification type does not require a time attribute. if 'updateNotification' in pin_json and not time_valid( parse_time(pin_json['updateNotification']['time'])): beeline.add_context_field('timeline.failure.details', 'invalid_time_for_update') return False if 'reminders' in pin_json: if len(pin_json['reminders']) > 3: beeline.add_context_field('timeline.failure.details', 'too_many_reminders') return False # Max 3 reminders for reminder in pin_json['reminders']: if not time_valid(parse_time(reminder['time'])): beeline.add_context_field('timeline.failure.details', 'invalid_reminder_time') return False except (KeyError, ValueError, TypeError): beeline.add_context_field('timeline.failure.details', 'miscellaneous_failure') return False return True
def googlecalendarwebhook(): external_id = request.headers["x-goog-channel-id"] add_context_field("goog_channel_id", external_id) add_context_field("goog_resource_id", request.headers["x-goog-resource-id"]) expiry = datetime.datetime.strptime( request.headers["x-goog-channel-expiration"], "%a, %d %b %Y %H:%M:%S %Z") add_context_field("goog_channel_expiration", f"{expiry:%Y-%m-%dT%H:%M:%SZ}") now = datetime.datetime.utcnow() with tracer("db connection"): try: with tracer("open db connection"): connection = psycopg2.connect(os.environ["POSTGRES_DSN"]) with tracer("find google issuer_sub transaction"), connection: with tracer("cursor"), connection.cursor() as cursor: with tracer("find google issuer_sub query"): cursor.execute( """ SELECT sub, calendar_id FROM calendarchatlink WHERE external_id = %s AND calendar_type = 'google' """, (external_id, ), ) if not cursor.rowcount: # Google retries 500 errors, even for deleted channels. Return a # 404 and it won't try again. return ("", 404) assert cursor.rowcount == 1 g.sub, google_calendar_id = next(cursor) events_response = oauth.google.get( ("https://www.googleapis.com/calendar/v3/calendars" f"/{google_calendar_id}/events"), params={ "maxResults": "50", "orderBy": "startTime", "singleEvents": "true", "timeMin": f"{now:%Y-%m-%dT%H:%M:%SZ}", "timeZone": "Etc/UTC", }, ) events_obj = events_response.json() events = events_obj["items"] calendar_tz = events_obj["timeZone"] for event in events: start = parse_event_time(event["start"], calendar_tz) if start > now: break else: # no break return ("", 204) with tracer("calendarcron table exists transaction"), connection: with tracer("cursor"), connection.cursor() as cursor: with tracer("calendarcron table exists query"): cursor.execute(""" SELECT table_name FROM information_schema.tables WHERE table_name = 'calendarcron' """) if not cursor.rowcount: with tracer("create calendarcron table query"): cursor.execute(""" CREATE TABLE calendarcron ( sub text, calendar_id text, calendar_type text, cron_id text, next_event_start_time timestamp ) """) with tracer("check next cron transaction"), connection: with tracer("cursor"), connection.cursor() as cursor: with tracer("check next cron query"): cursor.execute( """ SELECT cron_id, next_event_start_time FROM calendarcron WHERE sub = %s AND calendar_id = %s AND calendar_type = 'google' """, (g.sub, google_calendar_id), ) if cursor.rowcount: cron_id, existing_next_start_time = next(cursor) new_is_earlier = start < existing_next_start_time old_has_passed = existing_next_start_time < now if new_is_earlier or old_has_passed: with tracer("update cron time query"): cursor.execute( """ UPDATE calendarcron SET next_event_start_time = %s WHERE sub = %s AND calendar_id = %s AND calendar_type = 'google' """, (start, g.sub, google_calendar_id), ) cron_response = requests.get( "https://www.easycron.com/rest/edit", params={ "token": os.environ["EASYCRON_KEY"], "id": cron_id, "cron_expression": f"{start:%M %H %d %m * %Y}", "url": url_for( "calendarcron.calendarcron", _external=True, next_event_start_time=( f"{start:%Y-%m-%dT%H:%M:%S}"), calendar_id=google_calendar_id, calendar_type="google", ), }, ) cron_response.raise_for_status() error = cron_response.json().get("error", {}).get("message") if error: raise Exception(error) else: cron_response = requests.get( "https://www.easycron.com/rest/add", params={ "token": os.environ["EASYCRON_KEY"], "url": url_for( "calendarcron.calendarcron", _external=True, next_event_start_time=( f"{start:%Y-%m-%dT%H:%M:%S}"), calendar_id=google_calendar_id, calendar_type="google", ), "cron_expression": f"{start:%M %H %d %m * %Y}", "timezone_from": "2", "timezone": "UTC", }, ) cron_response.raise_for_status() error = cron_response.json().get("error", {}).get("message") if error: raise Exception(error) with tracer("insert cron query"): cursor.execute( """ INSERT INTO calendarcron ( sub, calendar_id, calendar_type, cron_id, next_event_start_time ) VALUES (%s, %s, 'google', %s, %s) """, ( g.sub, google_calendar_id, cron_response.json()["cron_job_id"], start, ), ) # New code testing in prod so suppress any errors with suppress(Exception): try: events_start = find_next_event_start(events_obj, now) except LookupError: return ("", 204) summaries = find_event_summaries_starting( events_obj, events_start) with tracer( "event_details table exists transaction"), connection: with tracer("cursor"), connection.cursor() as cursor: with tracer("event_details table exists query"): cursor.execute(""" SELECT table_name FROM information_schema.tables WHERE table_name = 'event_details' """) if not cursor.rowcount: with tracer( "create event_details table query"): cursor.execute(""" CREATE TABLE event_details ( calendar_type text, calendar_id text, summary text ) """) with tracer("update event_details transaction"), connection: with tracer("cursor"), connection.cursor() as cursor: with tracer("clear event_details query"): cursor.execute( """ DELETE FROM event_details WHERE calendar_type = 'google' AND calendar_id = %s """, (google_calendar_id, ), ) with tracer("insert event_details query"): execute_values( cursor, """ INSERT INTO event_details ( calendar_type, calendar_id, summary ) VALUES %s """, [("google", google_calendar_id, summary) for summary in summaries], ) return ("", 204) finally: connection.close()
def process_exception(self, request, exception): beeline.add_context_field("request.error_detail", str(exception))
def process_exception(self, request, exception): beeline.add_context_field( "request.error_detail", beeline.internal.stringify_exception(exception))