def __init__(self, datastore=None, redis=None, redis_persist=None, logger=None): super().__init__('assemblyline.dispatcher.file', logger) config: Config = forge.get_config() datastore: AssemblylineDatastore = datastore or forge.get_datastore( config) self.dispatcher = Dispatcher(redis=redis, redis_persist=redis_persist, datastore=datastore, logger=self.log) if config.core.metrics.apm_server.server_url is not None: self.log.info( f"Exporting application metrics to: {config.core.metrics.apm_server.server_url}" ) elasticapm.instrument() self.apm_client = elasticapm.Client( server_url=config.core.metrics.apm_server.server_url, service_name="dispatcher") else: self.apm_client = None
def __init__(self, logger=None, datastore=None, redis=None, persistent_redis=None): super().__init__('assemblyline.ingester.internals', logger=logger) config = forge.get_config() # Connect to all sorts of things datastore = datastore or forge.get_datastore(config) classification_engine = forge.get_classification() # Initialize the ingester specific resources self.ingester = Ingester(datastore=datastore, classification=classification_engine, logger=self.log, redis=redis, persistent_redis=persistent_redis) if config.core.metrics.apm_server.server_url is not None: self.log.info( f"Exporting application metrics to: {config.core.metrics.apm_server.server_url}" ) elasticapm.instrument() self.apm_client = elasticapm.Client( server_url=config.core.metrics.apm_server.server_url, service_name="ingester") else: self.apm_client = None
def test_elasticapm_structlog_log_correlation_ecs_fields(spec_validator): apm = elasticapm.Client({"SERVICE_NAME": "apm-service", "DISABLE_SEND": True}) stream = StringIO() logger = structlog.PrintLogger(stream) logger = structlog.wrap_logger( logger, processors=[structlog_processor, ecs_logging.StructlogFormatter()] ) log = logger.new() apm.begin_transaction("test-transaction") try: with elasticapm.capture_span("test-span"): span_id = elasticapm.get_span_id() trace_id = elasticapm.get_trace_id() transaction_id = elasticapm.get_transaction_id() log.info("test message") finally: apm.end_transaction("test-transaction") ecs = json.loads(spec_validator(stream.getvalue().rstrip())) ecs.pop("@timestamp") assert ecs == { "ecs": {"version": "1.6.0"}, "log.level": "info", "message": "test message", "span": {"id": span_id}, "trace": {"id": trace_id}, "transaction": {"id": transaction_id}, "service": {"name": "apm-service"}, }
def __init__(self, config=None): super().__init__('assemblyline.metrics_aggregator', shutdown_timeout=65) self.config = config or forge.get_config() self.elastic_hosts = self.config.core.metrics.elasticsearch.hosts self.is_datastream = False if not self.elastic_hosts: self.log.error( "No elasticsearch cluster defined to store metrics. All gathered stats will be ignored..." ) sys.exit(1) self.scheduler = BackgroundScheduler(daemon=True) self.metrics_queue = None self.es = None self.counters_lock = Lock() self.counters = {} if self.config.core.metrics.apm_server.server_url is not None: self.log.info( f"Exporting application metrics to: {self.config.core.metrics.apm_server.server_url}" ) elasticapm.instrument() self.apm_client = elasticapm.Client( server_url=self.config.core.metrics.apm_server.server_url, service_name="metrics_aggregator") else: self.apm_client = None
def init_apm_client(): global apm_server_url, apm_token, apm_api_key, apm_service_name, apm_custom_context, apm_parent_id LOGGER.debug("init_apm_client") if apm_server_url: if not (apm_token or apm_api_key) or not apm_service_name: pytest.fail(""" APM server URL, APM service name, and an TOKEN or API Key are required to connect to the APM service. --apm-server-url https://apm.example.com:8200 --apm-token a51bfe6c --apm-service-name my_service or --apm-server-url https://apm.example.com:8200 --apm-api-key 3398579f385ea51bfe6cb2183546931d --apm-service-name my_service """) # noqa E501 raise apm_client_local = None if apm_token: apm_client_local = elasticapm.Client( service_name=apm_service_name, server_url=apm_server_url, verify_server_cert=False, secret_token=apm_token, use_elastic_traceparent_header=True, debug=True) elif apm_api_key: apm_client_local = elasticapm.lient( service_name=apm_service_name, server_url=apm_server_url, verify_server_cert=False, api_key=apm_api_key, use_elastic_traceparent_header=True, debug=True) return apm_client_local
def __init__(self, redis=None, redis_persist=None): super().__init__('assemblyline.watcher', redis=redis, redis_persist=redis_persist) # Watcher structures self.hash = ExpiringHash(name=WATCHER_HASH, ttl=MAX_TIMEOUT, host=self.redis_persist) self.queue = UniquePriorityQueue(WATCHER_QUEUE, self.redis_persist) # Task management structures self.running_tasks = ExpiringHash( DISPATCH_RUNNING_TASK_HASH, host=self.redis) # TODO, move to persistant? self.scaler_timeout_queue = NamedQueue(SCALER_TIMEOUT_QUEUE, host=self.redis_persist) # Metrics tracking self.counter = MetricsFactory(metrics_type='watcher', schema=Metrics, name='watcher', redis=self.redis, config=self.config) if self.config.core.metrics.apm_server.server_url is not None: self.log.info( f"Exporting application metrics to: {self.config.core.metrics.apm_server.server_url}" ) elasticapm.instrument() self.apm_client = elasticapm.Client( server_url=self.config.core.metrics.apm_server.server_url, service_name="watcher") else: self.apm_client = None
def __init__(self, service_name='falcon_apm', server_url='http://localhost:8200'): self.hostname = socket.gethostname() self.client = elasticapm.Client(service_name=service_name, server_url=server_url)
def run(): metadata = {} if "COMMIT_TIMESTAMP" in os.environ: metadata["timestamp"] = os.environ.get("COMMIT_TIMESTAMP") metadata["revision"] = os.environ.get("COMMIT_SHA") metadata["commit_message"] = os.environ.get("COMMIT_MESSAGE").split( "\n")[0] runner = pyperf.Runner(metadata=metadata) pattern = os.environ.get("BENCH_PATTERN") args = runner.parse_args() if args.tracemalloc: bench_type = "tracemalloc" elif args.track_memory: bench_type = "trackmem" else: bench_type = "time" for func in discover_benchmarks(): name = "%s.%s.%s" % (str(func.__module__), func.__name__, bench_type) if not pattern or fnmatch.fnmatch(name, pattern): client = None if hasattr(func, "client_defaults"): # create the client outside of the benchmarked function client = elasticapm.Client(**func.client_defaults) func = functools.partial(func, client=client) if args.tracemalloc: tracemalloc.clear_traces() runner.bench_func(name, func) if client: client.close()
def __init__(self, config=None): super().__init__('assemblyline.es_metrics', shutdown_timeout=15) self.config = config or forge.get_config() self.target_hosts = self.config.core.metrics.elasticsearch.hosts self.index_interval = 10.0 self.old_node_data = {} self.old_cluster_data = {} self.old_index_data = {} self.old_index_time = 0.0 self.old_node_time = 0.0 self.old_cluster_time = 0.0 if not self.target_hosts: self.log.error( "No elasticsearch cluster defined to store metrics. All gathered stats will be ignored..." ) sys.exit(1) self.input_es = None self.target_es = None self.is_datastream = False if self.config.core.metrics.apm_server.server_url is not None: self.log.info( f"Exporting application metrics to: {self.config.core.metrics.apm_server.server_url}" ) elasticapm.instrument() self.apm_client = elasticapm.Client( server_url=self.config.core.metrics.apm_server.server_url, service_name="es_metrics") else: self.apm_client = None
def __init__(self, handler, registry): self.handler = handler self.registry = registry settings = registry.settings config = { 'SERVICE_NAME': settings['elasticapm.service_name'], 'SERVER_URL': settings['elasticapm.server_url'], 'SECRET_TOKEN': settings['elasticapm.secret_token'], 'ENVIRONMENT': settings['elasticapm.environment'], } if settings.get('elasticapm.transactions_ignore_patterns', ''): config['TRANSACTIONS_IGNORE_PATTERNS'] = settings[ 'elasticapm.transactions_ignore_patterns'].split() pkg_versions = dict() for pkg_name in ( 'pyramid', 'pyramid_elasticapm', settings['elasticapm.service_distribution'], ): pkg_versions[pkg_name] = pkg_resources.get_distribution( pkg_name).version self.client = elasticapm.Client( config, service_version=( pkg_versions[settings['elasticapm.service_distribution']]), framework_name='Pyramid', framework_version=pkg_versions['pyramid'], global_labels={ 'pyramid_elasticapm': pkg_versions['pyramid_elasticapm'] }, )
def __init__(self, config=None): super().__init__('assemblyline.heartbeat_manager') self.config = config or forge.get_config() self.datastore = forge.get_datastore() self.metrics_queue = CommsQueue(METRICS_QUEUE) self.scheduler = BackgroundScheduler(daemon=True) self.hm = HeartbeatFormatter("heartbeat_manager", self.log, config=self.config) self.counters_lock = Lock() self.counters = {} self.rolling_window = {} self.window_ttl = {} self.ttl = self.config.core.metrics.export_interval * 2 self.window_size = int(60 / self.config.core.metrics.export_interval) if self.window_size != 60 / self.config.core.metrics.export_interval: self.log.warning( "Cannot calculate a proper window size for reporting heartbeats. " "Metrics reported during hearbeat will be wrong.") if self.config.core.metrics.apm_server.server_url is not None: self.log.info( f"Exporting application metrics to: {self.config.core.metrics.apm_server.server_url}" ) elasticapm.instrument() self.apm_client = elasticapm.Client( server_url=self.config.core.metrics.apm_server.server_url, service_name="heartbeat_manager") else: self.apm_client = None
def __init__(self, handler, registry): self.handler = handler self.registry = registry self.client = elasticapm.Client( framework_name="Pyramid", framework_version=pkg_resources.get_distribution( "pyramid").version)
def test_elastic_apm_stdlib_with_filter_log_correlation_ecs_fields(): apm = elasticapm.Client({ "SERVICE_NAME": "apm-service", "DISABLE_SEND": True }) stream = StringIO() logger = logging.getLogger("apm-logger") handler = logging.StreamHandler(stream) handler.setFormatter( ecs_logging.StdlibFormatter( exclude_fields=["@timestamp", "process", "log.origin.file.line"])) handler.addFilter(LoggingFilter()) logger.addHandler(handler) logger.setLevel(logging.DEBUG) apm.begin_transaction("test-transaction") try: with elasticapm.capture_span("test-span"): span_id = elasticapm.get_span_id() trace_id = elasticapm.get_trace_id() transaction_id = elasticapm.get_transaction_id() logger.info("test message") finally: apm.end_transaction("test-transaction") ecs = json.loads(stream.getvalue().rstrip()) assert ecs == { "ecs": { "version": "1.5.0" }, "log": { "level": "info", "logger": "apm-logger", "origin": { "file": { "name": "test_apm.py" }, "function": "test_elastic_apm_stdlib_with_filter_log_correlation_ecs_fields", }, "original": "test message", }, "message": "test message", "span": { "id": span_id }, "trace": { "id": trace_id }, "transaction": { "id": transaction_id }, }
def __init__(self, pid=None, **kwargs): super().__init__(**kwargs) self.pid = pid self.prefix_cache = {} self.command_list = Command.__subclasses__() self.log_channel = None self.error_channel = None if APM_SERVICE: self.apm = elasticapm.Client({'SERVICE_NAME': APM_SERVICE})
def __init__(self, client_instance=None, config=None, scope_manager=None): self._agent = client_instance or get_client() or elasticapm.Client( config=config) if scope_manager and not isinstance(scope_manager, ThreadLocalScopeManager): warnings.warn( "Currently, the Elastic APM opentracing bridge only supports the ThreadLocalScopeManager. " "Usage of other scope managers will lead to unpredictable results." ) self._scope_manager = scope_manager or ThreadLocalScopeManager() if self._agent.config.instrument and self._agent.config.enabled: instrument()
def apm(): if sys.version_info < (3, 6): pytest.skip("elasticapm only supports python 3.6+") if sys.version_info[0] >= 3: record_factory = logging.getLogRecordFactory() apm = elasticapm.Client({ "SERVICE_NAME": "apm-service", "DISABLE_SEND": True }) yield apm apm.close() if sys.version_info[0] >= 3: logging.setLogRecordFactory(record_factory)
def __init__(self, config=None): super().__init__('assemblyline.statistics_aggregator') self.config = config or forge.get_config() self.cache = forge.get_statistics_cache(config=self.config) self.datastore = forge.get_datastore(archive_access=True) self.scheduler = BackgroundScheduler(daemon=True) if self.config.core.metrics.apm_server.server_url is not None: self.log.info(f"Exporting application metrics to: {self.config.core.metrics.apm_server.server_url}") elasticapm.instrument() self.apm_client = elasticapm.Client(server_url=self.config.core.metrics.apm_server.server_url, service_name="metrics_aggregator") else: self.apm_client = None
def __init__(self, force_ilm=False): self.config = forge.get_config() if force_ilm: self.config.datastore.ilm.enabled = True super().__init__('assemblyline.expiry', shutdown_timeout=self.config.core.expiry.sleep_time + 5) self.datastore = forge.get_datastore(config=self.config, archive_access=True) self.hot_datastore = forge.get_datastore(config=self.config, archive_access=False) self.filestore = forge.get_filestore(config=self.config) self.cachestore = FileStore(*self.config.filestore.cache) self.expirable_collections = [] self.archiveable_collections = [] self.counter = MetricsFactory('expiry', Metrics) self.counter_archive = MetricsFactory('archive', Metrics) if self.config.datastore.ilm.enabled: self.fs_hashmap = { 'file': self.archive_filestore_delete, 'cached_file': self.archive_cachestore_delete } else: self.fs_hashmap = { 'file': self.filestore_delete, 'cached_file': self.cachestore_delete } for name, definition in self.datastore.ds.get_models().items(): if hasattr(definition, 'archive_ts'): self.archiveable_collections.append( getattr(self.datastore, name)) if hasattr(definition, 'expiry_ts'): self.expirable_collections.append(getattr( self.datastore, name)) if self.config.core.metrics.apm_server.server_url is not None: self.log.info( f"Exporting application metrics to: {self.config.core.metrics.apm_server.server_url}" ) elasticapm.instrument() self.apm_client = elasticapm.Client( server_url=self.config.core.metrics.apm_server.server_url, service_name="expiry") else: self.apm_client = None
def __init__(self, pid=None, **kwargs): super().__init__(**kwargs) self.pid = pid self.prefix_cache = {} self.command_list = Command.__subclasses__() self.log_channel = None self.error_channel = None if APM_SERVICE: self.apm = elasticapm.Client({'SERVICE_NAME': APM_SERVICE}) else: self.apm = None print("Process {} created for shards {}".format(self.pid, self.shard_ids))
def apm_preflight(config, node_name, smoothing_strategy): # We're being a bit naughty here. Shhhh. elasticapm.utils.cgroup.get_cgroup_container_metadata = lambda: {'container': {'id': node_name}} processors = [] if smoothing_strategy == 'floor': processors.append('synthbean.processors.span_smoother') client = elasticapm.Client( hostname=node_name, service_node_name=node_name, service_name=config['service_name'], server_url=config['server_url'], processors=processors) elasticapm.instrument() return client
def __init__(self): super().__init__('assemblyline.workflow') self.config = forge.get_config() self.datastore = forge.get_datastore(self.config) self.start_ts = f"{self.datastore.ds.now}/{self.datastore.ds.day}-1{self.datastore.ds.day}" if self.config.core.metrics.apm_server.server_url is not None: self.log.info( f"Exporting application metrics to: {self.config.core.metrics.apm_server.server_url}" ) elasticapm.instrument() self.apm_client = elasticapm.Client( server_url=self.config.core.metrics.apm_server.server_url, service_name="workflow") else: self.apm_client = None
def test_python_version_deprecation(mock_python_version_tuple, version, raises, recwarn): warnings.simplefilter("always") mock_python_version_tuple.return_value = version e = None try: e = elasticapm.Client() finally: if e: e.close() if raises: assert len(recwarn) == 1 w = recwarn.pop(DeprecationWarning) assert "agent only supports" in w.message.args[0] else: assert len(recwarn) == 0
def __init__(self, handler, registry): self.handler = handler self.registry = registry config = registry.settings service_version = config.get("elasticapm.service_version") if service_version: try: service_version = pkg_resources.get_distribution(service_version).version except pkg_resources.DistributionNotFound: pass self.client = elasticapm.Client( server_url=config.get("elasticapm.server_url"), server_timeout=config.get("elasticapm.server_timeout"), name=config.get("elasticapm.name"), framework_name="Pyramid", framework_version=pkg_resources.get_distribution("pyramid").version, service_name=config.get("elasticapm.service_name"), service_version=service_version, secret_token=config.get("elasticapm.secret_token"), include_paths=list_from_setting(config, "elasticapm.include_paths"), exclude_paths=list_from_setting(config, "elasticapm.exclude_paths"), debug=asbool(config.get('elasticapm.debug')) )
def __init__(self): super().__init__('assemblyline.alerter') # Publish counters to the metrics sink. self.counter = MetricsFactory('alerter', Metrics) self.datastore = forge.get_datastore(self.config) self.persistent_redis = get_client( host=self.config.core.redis.persistent.host, port=self.config.core.redis.persistent.port, private=False, ) self.process_alert_message = forge.get_process_alert_message() self.running = False self.alert_queue = NamedQueue(ALERT_QUEUE_NAME, self.persistent_redis) if self.config.core.metrics.apm_server.server_url is not None: self.log.info( f"Exporting application metrics to: {self.config.core.metrics.apm_server.server_url}" ) elasticapm.instrument() self.apm_client = elasticapm.Client( server_url=self.config.core.metrics.apm_server.server_url, service_name="alerter") else: self.apm_client = None
def pytest_configure(config): # FIXME set url correctly client = _apm_client( e_.Client(service_name="testme", server_url="http://localhost:8200"))
import elasticapm import json elasticapm.instrument() apm = elasticapm.Client() def response(func, event, context, transaction_name=''): """ Calls `func(event, context)` and returns a properly formatted response, also reporting the transaction results to APM using either the given `transaction_name` or `func`'s name. """ if not transaction_name: transaction_name = func.__name__ apm.begin_transaction('Request') elasticapm.set_custom_context({ 'event': event, 'function_name': context.function_name, 'aws_request_id': context.aws_request_id, }) # https://docs.aws.amazon.com/pt_br/lambda/latest/dg/python-context-object.html try: body = func(event, context) response = {'statusCode': 200, 'body': json.dumps(body)} return response except Exception as e: error = {'code': type(e).__name__, 'message': str(e)} body = {'errors': [error]}
## Implemented by API reference - https://www.elastic.co/guide/en/apm/agent/python/current/api.html import elasticapm import time apm_client = elasticapm.Client( service_name='labels', server_url='http://localhost:8200', ) def uranium_enrichment(tts=1, ): time.sleep(tts) def til_atom( request_id, user_id, tts, ): # Starting a transaction - passing the transaction type as an argument apm_client.begin_transaction(transaction_type='tasks') # Adding a label elasticapm.label(request_id=request_id) try: # Do something very important uranium_enrichment(tts=tts) except Exception: result = 'failure'
import logging import random from aiohttp.client import ClientSession from sanic import Sanic from sanic import response import elasticapm from common import tracked_sleep logging.basicConfig(level=logging.DEBUG) app = Sanic(__name__) apm_client = elasticapm.Client(service_name='sanic-greeter') elasticapm.instrument() @app.middleware('request') async def start_transaction(request): apm_client.begin_transaction('request') @app.middleware('response') async def end_transaction(request, response): apm_client.end_transaction(request.uri_template, response.status) async def get_ip(): async with ClientSession() as session:
## Implemented by API reference - https://www.elastic.co/guide/en/apm/agent/python/current/api.html import elasticapm import pymongo import redis import requests apm_client = elasticapm.Client( service_name='instrument', server_url='http://localhost:8200', ) redis_client = redis.Redis() mongo_client = pymongo.MongoClient() def uranium_enrichment(): requests.get('https://www.google.com/search?q=Where+i+can+find+enrichmed+uranium&oq=Where+i+can+find+enrichmed+uranium&aqs=chrome..69i57j33.32739j0j1&sourceid=chrome&ie=UTF-8') def push_to_queue(): redis_client.lpush( 'til_atom_queue', b'til lapanim', ) def complete_mission(): mongo_client.til.missions.insert({ 'id': 'til_atom', 'status': 'done',
def bench_init(): client = elasticapm.Client()