def wrapped_f(*args, **kwargs): config = None result = None if current_app: config = current_app.config['ELASTIC_APM'] apm_enabled = str(current_app.config['ELASTIC_ENABLED']) == '1' elif sched.app: config = sched.app.app_context().app.config['ELASTIC_APM'] apm_enabled = str(sched.app.app_context().app. config['ELASTIC_ENABLED']) == '1' _name = name if name is not None else func.__name__ if apm_enabled: client = Client(config) if client: client.begin_transaction('registered_funcs') try: result = func(*args, **kwargs) client.end_transaction(f'{_name} - success') except Exception as e: client.capture_exception() client.end_transaction(f'{_name} - error') raise e else: print( f'could not create ElasticAPM client... running <{_name}> without APM' ) result = func(*args, **kwargs) else: result = func(*args, **kwargs) return result
def setup_apm(apm_config): """ Setup the APM :param apm_config: The apm configuration class :type apm_config: APMConfig :return: The apm client :rtype: elasticapm.Client """ app_name = apm_config.app_name secret = apm_config.secret_token debug = apm_config.debug cdict = { # allowed app_name chars: a-z, A-Z, 0-9, -, _, and space from elasticapm.contrib.flask 'APP_NAME': app_name, 'SECRET_TOKEN': secret, 'DEBUG': debug } ignore_patterns = apm_config.ignore_patterns if ignore_patterns: cdict['TRANSACTIONS_IGNORE_PATTERNS'] = ignore_patterns server_url = apm_config.server_url if server_url: cdict['SERVER_URL'] = server_url queue_size = apm_config.queue_size if queue_size: cdict['MAX_QUEUE_SIZE'] = queue_size return Client(cdict)
def main(profile: str): """ Celery worker main entry point Args: profile: profile used to run the app """ load_config(profile, CONFIGS_PATH, config, 'NLP_SERVICE') initialize_summary_service() load() publisher = container.get('exchange_publisher') if not publisher.test_connection(): LOGGER.error('Error connecting to the queue provider. Exiting...') sys.exit(1) add_logstash_handler(LOG_CONFIG, config.logstash.host, config.logstash.port) CELERY_APP.configure(task_queue_name='nlp-worker', broker_config=config.rabbit, worker_concurrency=config.celery.concurrency, result_backend_url=build_redis_url(**config.redis)) apm_client = Client(config={ 'SERVICE_NAME': config.elastic_apm.service_name, 'SECRET_TOKEN': config.elastic_apm.secret_token, 'SERVER_URL': config.elastic_apm.url }) register_instrumentation(apm_client) register_exception_tracking(apm_client) CELERY_APP.run()
def main(profile: str): """ Celery app main entry point Args: profile: profile used to run the app """ load_config(profile, CONFIGS_PATH, config, 'NEWS_DISCOVERY') load() publisher = container.get('exchange_publisher') if not publisher.test_connection(): LOGGER.error('Error connecting to the queue provider. Exiting...') sys.exit(1) add_logstash_handler(LOG_CONFIG, config.logstash.host, config.logstash.port) CELERY_APP.configure(task_queue_name='news-discovery', broker_config=config.rabbit, worker_concurrency=config.celery.concurrency) apm_client = Client( config={ 'SERVICE_NAME': 'news-discovery-app', 'SECRET_TOKEN': config.elastic_apm.secret_token, 'SERVER_URL': config.elastic_apm.url }) register_instrumentation(apm_client) register_exception_tracking(apm_client) CELERY_APP.run()
def __init__(self, app, client=None, **config): """ Create the elasticapm Client object and store in the app for later use. ElasticAPM configuration is sent in via the **config kwargs, or optionally can be added to the application via the Application object (as a dictionary under the "ELASTIC_APM" key in the settings). """ if "ELASTIC_APM" in app.settings and isinstance( app.settings["ELASTIC_APM"], dict): settings = app.settings["ELASTIC_APM"] settings.update(config) config = settings if not client: config.setdefault("framework_name", "tornado") config.setdefault("framework_version", tornado.version) client = Client(config) self.app = app self.client = client app.elasticapm_client = client # Don't instrument if debug=True in tornado, unless client.config.debug is True if (not self.app.settings.get("debug") or client.config.debug) and client.config.instrument: elasticapm.instrument()
def __init__(self, app, client=None): if not client: config = app.get("ELASTIC_APM", {}) config.setdefault("framework_name", "aiohttp") config.setdefault("framework_version", aiohttp.__version__) client = Client(config=config) self.app = app self.client = client self.install_tracing(app, client)
def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs self.client_ = APMClient(*args, **kwargs) try: self.client_.capture_message('ping') self.client_._transport.flush() except: self.client_.close() raise ConnectionError("Can't connect to APM-Server")
def init_apm_client(self, apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key): if apm_server_url: return Client(service_name=apm_service_name, server_url=apm_server_url, verify_server_cert=False, secret_token=apm_secret_token, api_key=apm_api_key, use_elastic_traceparent_header=True, debug=True)
def init_apm_client(self): """ Initializes the APM client. :param args: options loaded from command line or environment variables. :return: the APM client object. """ apm_client = None if self.args.apm_token: apm_client = Client(service_name=self.args.service_name, server_url=self.args.apm_server_url, verify_server_cert=False, secret_token=self.args.apm_token, use_elastic_traceparent_header=True, debug=True) elif self.args.api_key: apm_client = Client(service_name=self.args.service_name, server_url=self.args.apm_server_url, verify_server_cert=False, api_key=self.args.api_key, use_elastic_traceparent_header=True, debug=True) return apm_client
def wrapper(*args, **kwargs): config = None result = None if current_app: config = current_app.config['ELASTIC_APM'] apm_enabled = current_app.config['ELASTIC_ENABLED'] elif sched.app: config = sched.app.app_context().app.config['ELASTIC_APM'] apm_enabled = sched.app.app_context().app.config['ELASTIC_ENABLED'] client = Client(config) if client and apm_enabled: client.begin_transaction('registered_funcs') try: result = func(*args, **kwargs) client.end_transaction(f'{func.__name__} - success') except Exception as e: client.capture_exception() client.end_transaction(f'{func.__name__} - error') raise e else: print(f'Running <{func.__name__}> without APM') result = func(*args, **kwargs) return result
async def wrapper(*args, **kwargs): try: instrument() client = Client() handler = LoggingHandler() logger = logging.getLogger() logger.addHandler(handler) client.begin_transaction(tran_category) result = await func(*args, **kwargs) client.end_transaction(tran_name, ok_status) return result except Exception as e: logging.error(e, exc_info=True) client.end_transaction(tran_name, error_status) raise
class Client: def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs self.client_ = APMClient(*args, **kwargs) try: self.client_.capture_message('ping') self.client_._transport.flush() except: self.client_.close() raise ConnectionError("Can't connect to APM-Server") @property def client(self): if self.client_ is None: self.client_ = APMClient(*self.args, **self.kwargs) return self.client_ def close(self): try: self.client_.close() except: pass self.client_ = None def end_transaction(self, name=None, result="", duration=None): try: return self.client.end_transaction(name=name, result=result, duration=duration) except: self.close() return None def begin_transaction(self, transaction_type, trace_parent=None, start=None): try: return self.client.begin_transaction(transaction_type, trace_parent=trace_parent, start=start) except: self.close() return None def capture_exception(self, exc_info=None, handled=True, **kwargs): try: return self.client.capture_exception(exc_info=exc_info, handled=handled, **kwargs) except: self.close() return None def __getattr__(self, item): attr = getattr(self.client, item) return attr
def init_client( cls, service_name=os.environ.get("ELASTIC_APM_SERVICE_NAME", "kytos"), server_url=os.environ.get("ELASTIC_APM_URL", "http://localhost:8200"), secret_token=os.environ.get("ELASTIC_APM_SECRET_TOKEN", "elasticapm_token"), **kwargs, ) -> Client: """Init APM Client.""" app = kwargs.pop("app", None) if not cls._client: cls._client = Client( service_name=service_name, server_url=server_url, secret_token=secret_token, **kwargs, ) if not cls._flask_apm: cls._flask_apm = FlaskAPM(client=cls._client, app=app) return cls._client
def main(profile: str): """ Celery beat main entry point Args: profile: profile used to run the beat """ load_config(profile, CONFIGS_PATH, config, 'NEWS_DISCOVERY') add_logstash_handler(LOG_CONFIG, config.logstash.host, config.logstash.port) CELERY_BEAT.configure(task_queue_name='news-discovery', broker_config=config.rabbit) apm_client = Client(config={ 'SERVICE_NAME': 'news-discovery-beat', 'SECRET_TOKEN': config.elastic_apm.secret_token, 'SERVER_URL': config.elastic_apm.url }) register_instrumentation(apm_client) register_exception_tracking(apm_client) CELERY_BEAT.run(beat=True)
def connect_to_apm_server(self): """ connect to apm server """ try: if self.service_name == None: self.service_name = str(os.path.basename( __main__.__file__)).split(".")[0] except: raise ValueError( "file name string/service name is incorrect for elastic apm module" ) try: self.client = Client({ 'SERVICE_NAME': self.service_name, "SERVER_URL": self.server_url }) except: self.apm_server_reachable = False finally: # continue normallly even if APM server cant be connected pass
def wrapped_f(*args, **kwargs): client = None if current_app: client = Client(current_app.config['ELASTIC_APM']) elif sched.app: client = Client( sched.app.app_context().app.config['ELASTIC_APM']) _name = name if name is not None else func.__name__ # ensure client was created properly if client: client.begin_transaction('registered_funcs') try: func(*args, **kwargs) client.end_transaction(f'{_name} - success') except Exception as e: client.capture_exception() client.end_transaction(f'{_name} - error') raise e else: print( f'could not create ElasticAPM client... running <{_name}> without APM' ) func(*args, **kwargs)
output['channel'] client.capture_message('processed %s' % output['text']) return o return None, None if __name__ == "__main__": LOGGER = logging.getLogger(__name__) LOG_FORMAT = ('%(asctime)-15s %(levelname) -5s %(name) -5s %(funcName) ' '-3s %(lineno) -5d: %(message)s') logging.basicConfig(format=LOG_FORMAT, level=logging.INFO) client = Client({ 'SERVICE_NAME': 'archbot', 'DEBUG': False, 'SERVER_URL': 'http://apm-server-prd.apps.do-prd-okp-m0.do.viaa.be:80' }) handler = LoggingHandler(client=client) handler.setLevel(logging.WARN) logging.getLogger('elasticapm').setLevel('INFO') LOGGER.addHandler(handler) try: formatter = logging.Formatter('%(asctime)-15s %(levelname)-6s:' '%(message)s') logging.basicConfig(format=formatter, level=logging.INFO) except Exception: client.capture_exception() try:
def client(self): if self.client_ is None: self.client_ = APMClient(*self.args, **self.kwargs) return self.client_
from grpc_health.v1 import health_pb2 from grpc_health.v1 import health_pb2_grpc import demo_pb2 import demo_pb2_grpc logger = logging.getLogger('emailService') logHandler = logging.StreamHandler(stream=sys.stdout) logHandler.setFormatter(ecs_logging.StdlibFormatter()) logger.addHandler(logHandler) logger.setLevel(os.environ.get('LOG_LEVEL', 'INFO')) elasticapm.instrument() env = dict(os.environ) client = Client( { apm_key.replace('ELASTIC_APM_', ''): env[apm_key] for apm_key in env if apm_key.startswith('ELASTIC_APM') }, **{'SERVICE_NAME': 'emailService'}) # Loads confirmation email template from file env = Environment(loader=FileSystemLoader('templates'), autoescape=select_autoescape(['html', 'xml'])) template = env.get_template('confirmation.html') event_dataset = None class BaseEmailService(demo_pb2_grpc.EmailServiceServicer): def extract_trace_parent(self, context): trace_parent = None for key, value in context.invocation_metadata():
"span.id": elasticapm.get_span_id(), "trace.id": elasticapm.get_trace_id(), "event.dataset": event_dataset } return payload if __name__ == '__main__': defaults = {'SERVICE_NAME': 'RecommendationServer'} event_dataset = defaults['SERVICE_NAME'] + ".log" logger.info('initializing recommendationservice', extra=get_extra_logging_payload()) env = dict(os.environ) client = Client( { apm_key.replace('ELASTIC_APM_', ''): env[apm_key] for apm_key in env if apm_key.startswith('ELASTIC_APM') }, **defaults) port = os.environ.get('PORT', '8080') catalog_addr = os.environ.get('PRODUCT_CATALOG_SERVICE_ADDR', '') if catalog_addr == '': raise Exception( 'PRODUCT_CATALOG_SERVICE_ADDR environment variable not set') logger.info('product catalog address: ' + catalog_addr, extra=get_extra_logging_payload()) channel = grpc.insecure_channel(catalog_addr) product_catalog_stub = demo_pb2_grpc.ProductCatalogServiceStub(channel) # create gRPC server server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
import os from bottle import route, run, template, view, static_file from elasticapm.contrib.opentracing import Tracer from opentracing.propagation import Format from elasticapm import Client ENVIRONMENT = os.getenv('ENVIRONMENT', 'local') DEBUG = True if os.getenv('DEBUG', 'False').lower() == 'true' else False RELOADER = True if os.getenv('RELOADER', 'False').lower() == 'true' else False TRACER = Tracer(Client({'SERVICE_NAME': os.environ.get('APM_NAME')})) @route('/hello/static/<filename:path>') @route('/static/<filename:path>') def send_static(filename): return static_file(filename, root='./static') @route('/') @view('home') def index(): with TRACER.start_active_span("index", finish_on_close=True): return dict(environment=ENVIRONMENT) @route('/hello') @route('/hello/<name>') @view('hello') def hello(name='World'): with self._tracer.start_active_span("hello", finish_on_close=True):
# <SFTRACE-CONFIG> add the below agent specific configuration from elasticapm import Client, instrument from elasticapm.contrib.celery import register_exception_tracking, register_instrumentation import os instrument() # <SFTRACE-CONFIG> Replace <service_name> with appropriate value. The service_name is used to identify and filter the traces related to an application and should be named appropriately to distinctly identify it. Service name must only contain characters from the ASCII alphabet, numbers, dashes, underscores and spaces. import os, json try: SFTRACE_CONFIG = json.loads( os.popen('/opt/sfagent/sftrace/sftrace').readlines()[0] ) if len( os.popen('/opt/sfagent/sftrace/sftrace').readlines()) > 0 else dict() apm_client = Client( service_name='<service_name>', server_url=SFTRACE_CONFIG.get('SFTRACE_SERVER_URL'), global_labels=SFTRACE_CONFIG.get('SFTRACE_GLOBAL_LABELS'), verify_server_cert=SFTRACE_CONFIG.get('SFTRACE_VERIFY_SERVER_CERT')) register_exception_tracking(apm_client) register_instrumentation(apm_client) except Exception as error: print("Error while fetching snappyflow tracing configurations", error) # sfagent config finish app = Celery('tasks', broker='amqp://guest@localhost:5672') @app.task def add(x, y): return x + y
from celery import Celery # <SFTRACE-CONFIG> add the below agent specific configuration from elasticapm import Client, instrument from elasticapm.contrib.celery import register_exception_tracking, register_instrumentation import os instrument() # <SFTRACE-CONFIG> Replace <service_name> with appropriate value. The service_name is used to identify and filter the traces related to an application and should be named appropriately to distinctly identify it. Service name must only contain characters from the ASCII alphabet, numbers, dashes, underscores and spaces. apm_client = Client(server_url=os.getenv('SFTRACE_SERVER_URL', None), global_labels=os.getenv('SFTRACE_GLOBAL_LABELS', None), service_name='<service_name>', verify_server_cert=os.getenv('SFTRACE_VERFIY_SERVER_CERT', None)) register_exception_tracking(apm_client) register_instrumentation(apm_client) # sfagent config finish app = Celery('tasks', broker='amqp://guest@localhost:5672') @app.task def add(x, y): return x + y
from domain.delivery import Delivery from whatsapp.whatsapp_cli_interface import send_whatsapp from alert_manager import AlertManager from whatsapp.whatsapp_process import WhatsAppProcess from celery.signals import task_postrun from celery import group from celery.exceptions import SoftTimeLimitExceeded from celery.exceptions import MaxRetriesExceededError from elasticapm import Client from task_queue.celery_config import * client = Client({ 'SERVICE_NAME': os.environ['ELASTIC_APM_SERVICE_NAME'], 'SERVER_URL': os.environ['ELASTIC_APM_SERVER_URL'] }) file_manager = FileManager() alert_manager = AlertManager() logger = log_manager.get_logger('session_manager') def purge_tasks(): """ Clear all tasks from queues :returns number of tasks purged """ return app.control.purge()
import time import redis import os import json import requests from py_zipkin.zipkin import zipkin_span, ZipkinAttrs, generate_random_64bit_string import time import random import elasticapm from elasticapm import Client client = Client({'SERVICE_NAME': 'python'}) @elasticapm.capture_span() def log_message(message): time_delay = random.randrange(0, 2000) time.sleep(time_delay / 1000) print('message received after waiting for {}ms: {}'.format( time_delay, message)) if __name__ == '__main__': redis_host = os.environ['REDIS_HOST'] redis_port = int(os.environ['REDIS_PORT']) redis_channel = os.environ['REDIS_CHANNEL'] zipkin_url = os.environ['ZIPKIN_URL'] if 'ZIPKIN_URL' in os.environ else '' def http_transport(encoded_span): requests.post(
from botocore.exceptions import ClientError rds_host = "rds-instance-endpoint" name = "db_username" password = "******" db_name = "db_name" recepient_email = "*****@*****.**" sender_email = "*****@*****.**" logger = logging.getLogger() logger.setLevel(logging.INFO) client = Client( { 'SERVER_URL': 'https://xxxxxxxxxxxxxxxxxxxxxxxx.apm.us-east-1.aws.cloud.es.io:443', 'SERVICE_NAME': 'feedback-form', 'ENVIRONMENT': 'prod', 'SECRET_TOKEN': 'xxxxxxxxxxxxxxxxxxxxxxx' } ) client.begin_transaction('request') def lambda_handler(event, context): try: validate(event=event, schema=schemas.INPUT, envelope="queryStringParameters") except SchemaValidationError: client.capture_exception() return { "statusCode": 400, "body": json.dumps({"message": "Bad Request"}), } except:
import time import redis import os import json import requests import time import random import elasticapm from elasticapm.utils.disttracing import TraceParent from elasticapm import Client client = Client({'SERVICE_NAME': 'python'}) @elasticapm.capture_span() def log_message(message): time_delay = random.randrange(0, 2000) time.sleep(time_delay / 1000) print('message received after waiting for {}ms: {}'.format(time_delay, message)) if __name__ == '__main__': redis_host = os.environ['REDIS_HOST'] redis_port = int(os.environ['REDIS_PORT']) redis_channel = os.environ['REDIS_CHANNEL'] pubsub = redis.Redis(host=redis_host, port=redis_port, db=0).pubsub() pubsub.subscribe([redis_channel]) for item in pubsub.listen(): try: