import logging import logging.handlers from common_util import config_util logger = logging.getLogger() DEBUG_LOGGING = config_util.get('DEBUG_LOGGING') def setup_logging(): # clear any existing handlers if logger.hasHandlers(): logger.handlers.clear() formatter = logging.Formatter( ( "[%(asctime)s] | %(levelname)s | pid%(process)d | " "%(pathname)s.%(funcName)s:%(lineno)d | %(message)s" ) ) # add log handler to sys.stderr handler = logging.StreamHandler() handler.setFormatter(formatter) handler.setLevel(logging.INFO) logger.addHandler(handler) logger.setLevel(logging.INFO) logger.info('Setting up logging. Debug? %s', bool(DEBUG_LOGGING))
from common_util import ( config_util, redis_util, ) from lib.traceback.traceback import Traceback REGION_PREFIX = 'dogpile:fullstory' DOGPILE_REGION = redis_util.make_dogpile_region(REGION_PREFIX) def invalidate_cache(): redis_util.force_redis_cache_invalidation(REGION_PREFIX) __FULLSTORY_AUTH_TOKEN = config_util.get('FULLSTORY_AUTH_TOKEN') __ONE_HOUR_IN_SECONDS = 60 * 60 __FULLSTORY_SESSIONS_GET_API = 'https://www.fullstory.com/api/v1/sessions?uid={profile_name}&limit={limit}' """ Link to get the sessions for a given profile name. Caller must provide these parameters: - profile_name: the profile name of the fullstory user. corresponds to the fullstory 'uid' - limit: the number of sessions to get for the given user ref: https://help.fullstory.com/develop-rest/137382-rest-api-retrieving-a-list-of-sessions-for-a-given-user-after-the-fact """ __FULLSTORY_SESSIONS_LIMIT = 100000
from lib.traceback.traceback import Traceback from common_util import ( config_util, ) TRACEBACKS_CHANNEL_NAME = 'tracebacks' TRACEBACKS_CHANNEL_NAME_ADWORDS = 'tracebacks-adwords' TRACEBACKS_CHANNEL_NAME_SOCIAL = 'tracebacks-social' SLACK_WEBHOOK_TRACEBACKS = config_util.get('SLACK_WEBHOOK_TRACEBACKS') SLACK_WEBHOOK_TRACEBACKS_ADWORDS = config_util.get('SLACK_WEBHOOK_TRACEBACKS_ADWORDS') SLACK_WEBHOOK_TRACEBACKS_SOCIAL = config_util.get('SLACK_WEBHOOK_TRACEBACKS_SOCIAL') def get_webhook_url(traceback:Traceback): """ Given a traceback, returns the Slack App webhook to post to the appropriate slack channel. """ channel_name = get_channel_name(traceback) if channel_name == TRACEBACKS_CHANNEL_NAME: return SLACK_WEBHOOK_TRACEBACKS elif channel_name == TRACEBACKS_CHANNEL_NAME_ADWORDS: return SLACK_WEBHOOK_TRACEBACKS_ADWORDS elif channel_name == TRACEBACKS_CHANNEL_NAME_SOCIAL: return SLACK_WEBHOOK_TRACEBACKS_SOCIAL else: assert False, 'unknown channel %s' % channel_name return None def get_channel_name(traceback:Traceback):
import os import subprocess import tempfile import time import pytest import requests from common_util import config_util BADCORP_PAPERTRAIL_API_KEY = config_util.get('BADCORP_PAPERTRAIL_API_KEY') assert BADCORP_PAPERTRAIL_API_KEY ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) MAKEFILE_PATH = os.path.join(ROOT_DIR, 'Makefile') PAPERTRAIL_API_CONFIG = os.path.join(ROOT_DIR, '.papertrail.yml') def test_papertrail_to_elasticsearch_integration(setup_server_daemon): # run our Badcorp, saving tracebacks to papertrail # TODO: do we want to pass a uuid here and make sure we receive it on the other side? OR we # could use the box ID from the docker machine subprocess.check_call('make run-badcorp', cwd=ROOT_DIR, shell=True) # run the papertrail-cli and confirm that we see the Badcorp traceback with tempfile.NamedTemporaryFile(mode='w') as papertrail_creds_file: api_key_string = 'token: {}'.format(BADCORP_PAPERTRAIL_API_KEY) papertrail_creds_file.write(api_key_string) papertrail_creds_file.flush() res = subprocess.check_output('papertrail -c {}'.format( papertrail_creds_file.name),
Optional, ) import jira from lib.jira import ( jira_issue_db, ) from lib.jira.jira_issue import JiraIssue from lib.traceback import ( traceback_formatter, ) from common_util import ( config_util, ) logger = logging.getLogger() JIRA_SERVER = config_util.get('JIRA_SERVER') JIRA_BASIC_AUTH_USERNAME = config_util.get('JIRA_BASIC_AUTH_USERNAME') JIRA_BASIC_AUTH_PASSWORD = config_util.get('JIRA_BASIC_AUTH_PASSWORD') JIRA_PROJECT_KEY = config_util.get('JIRA_PROJECT_KEY') JIRA_ASSIGNEE_ADWORDS = config_util.get('JIRA_ASSIGNEE_ADWORDS') JIRA_ASSIGNEE_BING = config_util.get('JIRA_ASSIGNEE_BING') JIRA_ASSIGNEE_SOCIAL = config_util.get('JIRA_ASSIGNEE_SOCIAL') JIRA_ASSIGNEE_GRADER = config_util.get('JIRA_ASSIGNEE_GRADER') DESCRIPTION_TEMPLATE = '''Error observed in production. {noformat} %s {noformat}
from lib.jira import ( jira_issue_aservice, jira_issue_db, ) from lib.papertrail import ( realtime_updater, ) from lib.parser import ( s3, ) from lib.slack import ( slack_poster, ) from lib.traceback import ( traceback_db, ) from webapp import ( api_aservice, ) REDIS_ADDRESS = config_util.get('REDIS_ADDRESS') ES_ADDRESS = config_util.get('ES_ADDRESS') app = celery.Celery('tasks', broker='redis://' + REDIS_ADDRESS) # set up database ES = Elasticsearch([ES_ADDRESS], ca_certs=certifi.where()) REDIS = redis.StrictRedis(host=REDIS_ADDRESS) logger = logging.getLogger() @app.task def update_jira_issue(issue_key, do_invalidate_cache): """ update a jira issue in our database, given its key
""" import logging import tempfile import boto3 import botocore from common_util import ( config_util, retry, ) from lib.papertrail import file_parser logger = logging.getLogger() AWS_REGION = config_util.get('AWS_REGION') AWS_ACCESS_KEY_ID = config_util.get('AWS_ACCESS_KEY_ID') AWS_SECRET_ACCESS_KEY = config_util.get('AWS_SECRET_ACCESS_KEY') @retry.Retry(exceptions=(EOFError, )) def parse_s3_file(bucket, key): """ Downloads the file given described by the params and parses it. Returns a list of L{Traceback}s and a list of L{ApiCall}. Returns None, None on error. """ s3 = boto3.client( 's3', region_name=AWS_REGION, aws_access_key_id=AWS_ACCESS_KEY_ID,
import logging import json import requests from common_util import ( config_util, ) from lib.jira import ( jira_issue_aservice, ) from lib.jira.jira_issue import JiraIssue from lib.slack import slack_channel from lib.traceback import ( traceback_formatter, ) from lib.traceback.traceback import Traceback SLACK_REAL_USER_TOKEN = config_util.get('SLACK_REAL_USER_TOKEN') logger = logging.getLogger() MESSAGE_TEMPLATE = """ ``` {traceback_text}```""" JIRA_ISSUE_TEMPLATE = """ - <{issue_link}|{issue_key}>, {issue_status}, {issue_assignee}: {issue_summary} """ """ a template for rendering a single jira issue in slack requires: - issue_link: a url link to this issue
def get_db() -> Elasticsearch: ES_ADDRESS = config_util.get('ES_ADDRESS') ES = Elasticsearch([ES_ADDRESS], ca_certs=certifi.where()) return ES
import logging import dogpile.cache import redis from common_util import config_util USE_DOGPILE_CACHE = config_util.get('USE_DOGPILE_CACHE') REDIS_ADDRESS = config_util.get('REDIS_ADDRESS') REDIS = redis.StrictRedis(host=config_util.get('REDIS_ADDRESS')) logger = logging.getLogger() def make_dogpile_region(dogpile_region_prefix: str): if not USE_DOGPILE_CACHE: dogpile_region = dogpile.cache.make_region().configure( 'dogpile.cache.null') logger.info("dogpile cache turned off") return dogpile_region key_mangler_func = lambda key: ("%s:%s" % (dogpile_region_prefix, dogpile.cache.util.sha1_mangle_key( key.encode('utf-8')))) dogpile_region = dogpile.cache.make_region( key_mangler=key_mangler_func).configure( 'dogpile.cache.redis', expiration_time=60 * 15, # 15 minutes arguments={
from typing import Optional, Callable, Set import logging from common_util import ( config_util, ) from lib.fullstory import fullstory from lib.traceback.traceback import Traceback logger = logging.getLogger() KIBANA_REDIRECT_URL = config_util.get('KIBANA_REDIRECT_URL') PRODUCT_URL = config_util.get('PRODUCT_URL') TIMESTAMP_TEMPLATE = '%b %d %Y %H:%M:%S' """ A template for human-readable timestamps. Timezone info is ignored. To be used by datetime a datetime object like this: `dt.strftime(TIMESTAMP_TEMPLATE)` """ PAPERTRAIL_LINK_JIRA_TEMPLATE = "[{{{{{timestamp}}}}}|{kibana_redirect_url}/api/traceback/{papertrail_id}]" PAPERTRAIL_LINK_SLACK_TEMPLATE = "<{kibana_redirect_url}/api/traceback/{papertrail_id}|{timestamp}>" """ A template for a link to papertrail, with the timestamp as the human-readable string. Instead of linking directly to papertrail, we include a redirect service. This lets us dynamically link to the Elasticsearch archive after the papertrail link has been recycled. Caller must provide: - timestamp, from TIMESTAMP_TEMPLATE - a link to a service that redirects to kibana. example: 'https://kibana-redirect.company.com'
from opentracing_instrumentation.client_hooks import install_all_patches import jaeger_client from jaeger_client.metrics.prometheus import PrometheusMetricsFactory from common_util import config_util JAEGER_REPORTING_HOST = config_util.get('JAEGER_REPORTING_HOST') def initialize_tracer(): install_all_patches() config = jaeger_client.Config( config={ 'sampler': { 'type': 'const', 'param': 1 }, 'logging': True, 'local_agent': { 'reporting_host': JAEGER_REPORTING_HOST, } }, service_name='tracebacks', validate=True, metrics_factory=PrometheusMetricsFactory(namespace='tracebacks'), ) return config.initialize_tracer() # also sets opentracing.tracer