def test_flush_fail(self): server = TestServer(5006) server.set_response_data("unparsablejson") server.start() stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5006', agent_key = 'key1', app_name = 'TestPythonApp', debug = True ) m = { 'm1': 1 } agent.message_queue.add('t1', m) m = { 'm2': 2 } agent.message_queue.add('t1', m) agent.message_queue.flush() self.assertEqual(len(agent.message_queue.queue), 2) agent.destroy() server.join()
def test_flush(self): server = TestServer(5005) server.start() stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5005', agent_key = 'key1', app_name = 'TestPythonApp', debug = True ) m = { 'm1': 1 } agent.message_queue.add('t1', m) m = { 'm2': 2 } agent.message_queue.add('t1', m) agent.message_queue.queue[0]['added_at'] = timestamp() - 20 * 60 agent.message_queue.flush() data = json.loads(server.get_request_data()) self.assertEqual(data['payload']['messages'][0]['content']['m2'], 2) agent.destroy() server.join()
def test_record_profile(self): if runtime_info.OS_WIN: return stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', debug = True ) agent.cpu_reporter.start() def record(): agent.cpu_reporter.record(2) record_t = threading.Thread(target=record) record_t.start() def cpu_work_main_thread(): for i in range(0, 1000000): text = "text1" + str(i) text = text + "text2" cpu_work_main_thread() record_t.join() #print(agent.cpu_reporter.profile) self.assertTrue('cpu_work_main_thread' in str(agent.cpu_reporter.profile)) agent.destroy()
def test_allocation_profile(self): if runtime_info.OS_WIN or not min_version(3, 4): return stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', auto_profiling = False, debug = True ) messages = [] def add_mock(topic, message): messages.append(message) agent.message_queue.add = add_mock agent.start_allocation_profiler() mem1 = [] for i in range(0, 1000): obj1 = {'v': random.randint(0, 1000000)} mem1.append(obj1) agent.stop_allocation_profiler() self.assertTrue('agent_test.py' in str(messages)) agent.destroy()
def test_run_in_main_thread(self): if runtime_info.OS_WIN: return stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', debug = True ) result = {} def _run(): result['thread_id'] = threading.current_thread().ident def _thread(): agent.run_in_main_thread(_run) t = threading.Thread(target=_thread) t.start() t.join() self.assertEqual(result['thread_id'], threading.current_thread().ident) agent.destroy()
def test_post(self): server = TestServer(5001) server.set_response_data(json.dumps({'c': 3, 'd': 4})) server.start() stackimpact._agent = None agent = stackimpact.start(dashboard_address='http://localhost:5001', agent_key='key1', app_name='TestPythonApp', app_environment='test', app_version='1.1.1', debug=True) api_request = APIRequest(agent) api_request.post('test', {'a': 1, 'b': 2}) data = json.loads(server.get_request_data()) self.assertEqual(data['run_id'], agent.run_id) self.assertEqual(data['run_ts'], agent.run_ts) self.assertEqual(data['process_id'], os.getpid()) self.assertEqual(data['host_name'], socket.gethostname()) self.assertEqual(data['runtime_type'], 'python') self.assertEqual( data['runtime_version'], '{0.major}.{0.minor}.{0.micro}'.format(sys.version_info)) self.assertEqual(data['agent_version'], agent.AGENT_VERSION) self.assertEqual(data['app_name'], 'TestPythonApp') self.assertEqual(data['app_environment'], 'test') self.assertEqual(data['app_version'], '1.1.1') self.assertEqual(data['payload'], {'a': 1, 'b': 2}) agent.destroy() server.join()
def test_flush(self): server = TestServer(5004) server.start() stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5004', agent_key = 'key1', app_name = 'TestPythonApp', debug = True ) m = { 'm1': 1 } agent.message_queue.add('t1', m) m = { 'm2': 2 } agent.message_queue.add('t1', m) agent.message_queue.flush() data = json.loads(server.get_request_data()) self.assertEqual(data['payload']['messages'][0]['content']['m1'], 1) self.assertEqual(data['payload']['messages'][1]['content']['m2'], 2) agent.destroy() server.join()
def test_cpu_profile(self): stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', auto_profiling = False, debug = True ) messages = [] def add_mock(topic, message): messages.append(message) agent.message_queue.add = add_mock agent.start_cpu_profiler() for j in range(0, 2000000): random.randint(1, 1000000) agent.stop_cpu_profiler() self.assertTrue('test_cpu_profile' in str(messages)) agent.destroy()
def test_profile(self): if runtime_info.OS_WIN: return stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', auto_profiling = False, debug = True ) agent.cpu_reporter.start() span = agent.profile() for i in range(0, 2000000): random.randint(1, 1000000) span.stop() agent.cpu_reporter.report() self.assertTrue('test_profile' in str(agent.message_queue.queue)) agent.destroy()
def test_block_profile(self): if runtime_info.OS_WIN or not min_version(3, 4): return stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', auto_profiling = False, debug = True ) messages = [] def add_mock(topic, message): messages.append(message) agent.message_queue.add = add_mock agent.start_block_profiler() def blocking_call(): time.sleep(0.1) for i in range(5): blocking_call() agent.stop_block_profiler() self.assertTrue('blocking_call' in str(messages)) agent.destroy()
def test_tf_profile(self): stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', auto_profiling = False, debug = True ) if not agent.tf_reporter.profiler.ready: return messages = [] def add_mock(topic, message): messages.append(message) agent.message_queue.add = add_mock agent.start_tf_profiler() import tensorflow as tf x = tf.random_normal([1000, 1000]) y = tf.random_normal([1000, 1000]) res = tf.matmul(x, y) with tf.Session() as sess: sess.run(res) agent.stop_tf_profiler() self.assertTrue('test_tf_profile' in str(messages)) agent.destroy()
def test_expire(self): stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5003', agent_key = 'key1', app_name = 'TestPythonApp', debug = True ) m = { 'm1': 1 } agent.message_queue.add('t1', m) m = { 'm2': 2 } agent.message_queue.add('t1', m) agent.message_queue.queue[0]['added_at'] = timestamp() - 20 * 60 agent.message_queue.expire() self.assertEqual(len(agent.message_queue.queue), 1) self.assertEqual(agent.message_queue.queue[0]['content']['m2'], 2) agent.destroy()
def test_start_profiler(self): stackimpact._agent = None agent = stackimpact.start(dashboard_address='http://localhost:5001', agent_key='key1', app_name='TestPythonApp', debug=True) stats = { "records": 0, "reports": 0, } def record_func(duration): stats["records"] += 1 def report_func(): stats["reports"] += 1 ps = ProfilerScheduler(agent, 0.010, 0.002, 0.050, record_func, report_func) ps.start() time.sleep(0.150) self.assertFalse(stats["records"] < 10) self.assertFalse(stats["reports"] < 2) ps.stop() agent.destroy()
def test_tf_profile(self): stackimpact._agent = None agent = stackimpact.start(dashboard_address='http://localhost:5001', agent_key='key1', app_name='TestPythonApp', auto_profiling=False, debug=True) if not agent.tf_reporter.profiler.ready: return messages = [] def add_mock(topic, message): messages.append(message) agent.message_queue.add = add_mock agent.start_tf_profiler() import tensorflow as tf x = tf.random_normal([1000, 1000]) y = tf.random_normal([1000, 1000]) res = tf.matmul(x, y) with tf.Session() as sess: sess.run(res) agent.stop_tf_profiler() self.assertTrue('test_tf_profile' in str(messages)) agent.destroy()
def test_flush_fail(self): server = TestServer(5005) server.set_response_data("unparsablejson") server.start() stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5005', agent_key = 'key1', app_name = 'TestPythonApp', debug = True ) m = { 'm1': 1 } agent.message_queue.add('t1', m) m = { 'm2': 2 } agent.message_queue.add('t1', m) agent.message_queue.flush() self.assertEqual(len(agent.message_queue.queue), 2) agent.destroy() server.join()
def test_record_allocation_profile(self): if runtime_info.OS_WIN or not min_version(3, 4): return stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', auto_profiling = False, debug = True ) agent.allocation_reporter.profiler.reset() mem1 = [] def mem_leak(n = 100000): mem2 = [] for i in range(0, n): mem1.append(random.randint(0, 1000)) mem2.append(random.randint(0, 1000)) def mem_leak2(): mem_leak() def mem_leak3(): mem_leak2() def mem_leak4(): mem_leak3() def mem_leak5(): mem_leak4() result = {} def record(): agent.allocation_reporter.profiler.start_profiler() time.sleep(2) agent.allocation_reporter.profiler.stop_profiler() t = threading.Thread(target=record) t.start() # simulate leak mem_leak5() t.join() profile = agent.allocation_reporter.profiler.build_profile(2)[0]['profile'].to_dict() #print(str(profile)) self.assertTrue('allocation_profiler_test.py' in str(profile)) agent.destroy()
def test_set_get_props(self): stackimpact._agent = None agent = stackimpact.start(dashboard_address='http://localhost:5001', agent_key='key1', app_name='TestPythonApp', debug=True) self.assertFalse(agent.config.is_profiling_disabled()) agent.config.set_profiling_disabled(True) self.assertTrue(agent.config.is_profiling_disabled()) agent.destroy()
def test_set_get_props(self): stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', debug = True ) self.assertFalse(agent.config.is_profiling_disabled()) agent.config.set_profiling_disabled(True) self.assertTrue(agent.config.is_profiling_disabled()) agent.destroy()
def test_record_allocation_profile(self): if not min_version(3, 4): return stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', debug = True ) mem1 = [] def mem_leak(n = 100000): mem2 = [] for i in range(0, n): mem1.append(random.randint(0, 1000)) mem2.append(random.randint(0, 1000)) def mem_leak2(): mem_leak() def mem_leak3(): mem_leak2() def mem_leak4(): mem_leak3() def mem_leak5(): mem_leak4() result = {} def record(): agent.allocation_reporter.record(2) t = threading.Thread(target=record) t.start() # simulate leak mem_leak5() t.join() #print(agent.allocation_reporter.profile) self.assertTrue('allocation_reporter_test.py' in str(agent.allocation_reporter.profile)) agent.destroy()
def test_skip_stack(self): stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', debug = True ) test_agent_file = os.path.realpath(stackimpact.__file__) self.assertTrue(agent.frame_cache.is_agent_frame(test_agent_file)) test_system_file = os.path.realpath(threading.__file__) self.assertTrue(agent.frame_cache.is_system_frame(test_system_file)) agent.destroy()
def test_load(self): server = TestServer(5008) server.set_response_data('{"profiling_disabled":"yes"}') server.start() stackimpact._agent = None agent = stackimpact.start(dashboard_address='http://localhost:5008', agent_key='key1', app_name='TestPythonApp', debug=True) agent.config_loader.load() self.assertTrue(agent.config.is_profiling_disabled()) agent.destroy() server.join()
def test_report(self): stackimpact._agent = None agent = stackimpact.start(dashboard_address='http://localhost:5001', agent_key='key1', app_name='TestPythonApp', debug=True) agent.process_reporter.start() agent.process_reporter.report() time.sleep(0.1) agent.process_reporter.report() metrics = agent.process_reporter.metrics if not runtime_info.OS_WIN: self.is_valid(metrics, Metric.TYPE_COUNTER, Metric.CATEGORY_CPU, Metric.NAME_CPU_TIME, 0, float("inf")) self.is_valid(metrics, Metric.TYPE_STATE, Metric.CATEGORY_CPU, Metric.NAME_CPU_USAGE, 0, float("inf")) if not runtime_info.OS_WIN: self.is_valid(metrics, Metric.TYPE_STATE, Metric.CATEGORY_MEMORY, Metric.NAME_MAX_RSS, 0, float("inf")) if runtime_info.OS_LINUX: self.is_valid(metrics, Metric.TYPE_STATE, Metric.CATEGORY_MEMORY, Metric.NAME_CURRENT_RSS, 0, float("inf")) self.is_valid(metrics, Metric.TYPE_STATE, Metric.CATEGORY_MEMORY, Metric.NAME_VM_SIZE, 0, float("inf")) self.is_valid(metrics, Metric.TYPE_STATE, Metric.CATEGORY_GC, Metric.NAME_GC_COUNT, 0, float("inf")) if min_version(3, 4): self.is_valid(metrics, Metric.TYPE_COUNTER, Metric.CATEGORY_GC, Metric.NAME_GC_COLLECTIONS, 0, float("inf")) self.is_valid(metrics, Metric.TYPE_COUNTER, Metric.CATEGORY_GC, Metric.NAME_GC_COLLECTED, 0, float("inf")) self.is_valid(metrics, Metric.TYPE_STATE, Metric.CATEGORY_GC, Metric.NAME_GC_UNCOLLECTABLE, 0, float("inf")) self.is_valid(metrics, Metric.TYPE_STATE, Metric.CATEGORY_RUNTIME, Metric.NAME_THREAD_COUNT, 0, float("inf")) agent.destroy()
def test_record_tf_profile(self): stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', auto_profiling = False, debug = True ) agent.tf_reporter.profiler.reset() if not agent.tf_reporter.profiler.ready: return def record(): agent.tf_reporter.profiler.start_profiler() time.sleep(1) agent.tf_reporter.profiler.stop_profiler() record_t = threading.Thread(target=record) record_t.start() import tensorflow as tf x = tf.random_normal([1000, 1000]) y = tf.random_normal([1000, 1000]) res = tf.matmul(x, y) with tf.Session() as sess: sess.run(res) record_t.join() profile = agent.tf_reporter.profiler.build_profile(1)[0]['profile'].to_dict() #print(profile) self.assertTrue('test_record_tf_profile' in str(profile)) profile = agent.tf_reporter.profiler.build_profile(1)[1]['profile'].to_dict() #print(profile) self.assertTrue('test_record_tf_profile' in str(profile)) agent.destroy()
def test_load(self): server = TestServer(5008) server.set_response_data('{"profiling_disabled":"yes"}') server.start() stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5008', agent_key = 'key1', app_name = 'TestPythonApp', debug = True ) agent.config_loader.load() self.assertTrue(agent.config.is_profiling_disabled()) agent.destroy() server.join()
def test_counter_metric(self): stackimpact._agent = None agent = stackimpact.start(dashboard_address='http://localhost:5001', agent_key='key1', app_name='TestPythonApp', debug=True) m = Metric(agent, Metric.TYPE_COUNTER, Metric.CATEGORY_CPU, Metric.NAME_CPU_USAGE, Metric.UNIT_NONE) m.create_measurement(Metric.TRIGGER_TIMER, 100) self.assertFalse(m.has_measurement()) m.create_measurement(Metric.TRIGGER_TIMER, 110) self.assertEqual(m.measurement.value, 10) m.create_measurement(Metric.TRIGGER_TIMER, 115) self.assertEqual(m.measurement.value, 5) agent.destroy()
def test_counter_metric(self): stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', debug = True ) m = Metric(agent, Metric.TYPE_COUNTER, Metric.CATEGORY_CPU, Metric.NAME_CPU_USAGE, Metric.UNIT_NONE) m.create_measurement(Metric.TRIGGER_TIMER, 100) self.assertFalse(m.has_measurement()) m.create_measurement(Metric.TRIGGER_TIMER, 110) self.assertEqual(m.measurement.value, 10) m.create_measurement(Metric.TRIGGER_TIMER, 115) self.assertEqual(m.measurement.value, 5) agent.destroy()
def test_with_profile(self): if runtime_info.OS_WIN: return stackimpact._agent = None agent = stackimpact.start(dashboard_address='http://localhost:5001', agent_key='key1', app_name='TestPythonApp', debug=True) agent.cpu_reporter.start() with agent.profile(): for i in range(0, 2000000): random.randint(1, 1000000) agent.cpu_reporter.report() self.assertTrue('test_with_profile' in str(agent.message_queue.queue)) agent.destroy()
def test_add_exception(self): stackimpact._agent = None agent = stackimpact.start(dashboard_address='http://localhost:5001', agent_key='key1', app_name='TestPythonApp', debug=True) try: raise ValueError('test_exc_1') except: traceback.print_exc() time.sleep(1.1) profile_handled_exc = agent.error_reporter.profile print(profile_handled_exc) self.assertTrue('ValueError: test_exc_1' in str(profile_handled_exc)) self.assertTrue('test_add_exception' in str(profile_handled_exc)) agent.destroy()
def test_record_profile(self): if runtime_info.OS_WIN: return stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', auto_profiling = False, debug = True ) agent.cpu_reporter.profiler.reset() def record(): agent.cpu_reporter.profiler.start_profiler() time.sleep(2) agent.cpu_reporter.profiler.stop_profiler() record_t = threading.Thread(target=record) record_t.start() def cpu_work_main_thread(): for i in range(0, 1000000): text = "text1" + str(i) text = text + "text2" cpu_work_main_thread() record_t.join() profile = agent.cpu_reporter.profiler.build_profile(2)[0]['profile'].to_dict() #print(profile) self.assertTrue('cpu_work_main_thread' in str(profile)) agent.destroy()
def test_record_span(self): stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', debug = True ) agent.span_reporter.start() for i in range(10): agent.span_reporter.record_span("span1", 10); span_counters = agent.span_reporter.span_counters; agent.span_reporter.report(); counter = span_counters['span1'] #print(counter) self.assertEqual(counter.name, 'span1') self.assertEqual(counter.measurement, 10000) agent.destroy()
def test_add_exception(self): stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', debug = True ) agent.error_reporter.start() try: raise ValueError('test_exc_1') except: traceback.print_exc() time.sleep(1.1) profile_handled_exc = agent.error_reporter.profile #print(profile_handled_exc) self.assertTrue('ValueError: test_exc_1' in str(profile_handled_exc)) self.assertTrue('test_add_exception' in str(profile_handled_exc)) agent.destroy()
def test_report(self): stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', debug = True ) agent.process_reporter.start() agent.process_reporter.report() time.sleep(0.1) agent.process_reporter.report() metrics = agent.process_reporter.metrics if not runtime_info.OS_WIN: self.is_valid(metrics, Metric.TYPE_COUNTER, Metric.CATEGORY_CPU, Metric.NAME_CPU_TIME, 0, float("inf")) self.is_valid(metrics, Metric.TYPE_STATE, Metric.CATEGORY_CPU, Metric.NAME_CPU_USAGE, 0, float("inf")) if not runtime_info.OS_WIN: self.is_valid(metrics, Metric.TYPE_STATE, Metric.CATEGORY_MEMORY, Metric.NAME_MAX_RSS, 0, float("inf")) if runtime_info.OS_LINUX: self.is_valid(metrics, Metric.TYPE_STATE, Metric.CATEGORY_MEMORY, Metric.NAME_CURRENT_RSS, 0, float("inf")) self.is_valid(metrics, Metric.TYPE_STATE, Metric.CATEGORY_MEMORY, Metric.NAME_VM_SIZE, 0, float("inf")) self.is_valid(metrics, Metric.TYPE_STATE, Metric.CATEGORY_GC, Metric.NAME_GC_COUNT, 0, float("inf")) if min_version(3, 4): self.is_valid(metrics, Metric.TYPE_COUNTER, Metric.CATEGORY_GC, Metric.NAME_GC_COLLECTIONS, 0, float("inf")) self.is_valid(metrics, Metric.TYPE_COUNTER, Metric.CATEGORY_GC, Metric.NAME_GC_COLLECTED, 0, float("inf")) self.is_valid(metrics, Metric.TYPE_STATE, Metric.CATEGORY_GC, Metric.NAME_GC_UNCOLLECTABLE, 0, float("inf")) self.is_valid(metrics, Metric.TYPE_STATE, Metric.CATEGORY_RUNTIME, Metric.NAME_THREAD_COUNT, 0, float("inf")) agent.destroy()
from __future__ import print_function import random import time import sys import threading sys.path.append(".") import stackimpact agent = stackimpact.start(agent_key='agent key here', app_name='MyPythonApp') def simulate_cpu_work(): for j in range(0, 100000): random.randint(1, 1000000) def handle_some_event(): span = agent.profile('some event') simulate_cpu_work() span.stop() response = {"statusCode": 200, "body": 'Done'} return response # Simulate events while True:
""" WSGI config for muckrock project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/{{ docs_version }}/howto/deployment/wsgi/ """ # pylint: disable=wrong-import-position # pylint: disable=ungrouped-imports from django.conf import settings import stackimpact agent = stackimpact.start( agent_key=settings.STACKIMPACT_AGENT_KEY, app_name=settings.MUCKROCK_URL, ) from django.core.wsgi import get_wsgi_application application = get_wsgi_application() # Fix django closing connection to MemCachier after every request (#11331) from django.core.cache.backends.memcached import BaseMemcachedCache BaseMemcachedCache.close = lambda self, **kwargs: None
from __future__ import print_function import stackimpact import random import threading import time import signal agent = stackimpact.start(agent_key='agent key here', app_name='LambdaDemoPython', app_environment='prod', block_profiler_disabled=True) def simulate_cpu_work(): for j in range(0, 100000): random.randint(1, 1000000) mem = [] def simulate_mem_leak(): for i in range(0, 1000): obj = {'v': random.randint(0, 1000000)} mem.append(obj) def handler(event, context): span = agent.profile() simulate_cpu_work()
from __future__ import print_function import random import time import sys import threading sys.path.append(".") import stackimpact agent = stackimpact.start( agent_key = 'agent key here', app_name = 'MyPythonApp', auto_profiling = False) agent.start_cpu_profiler() for j in range(0, 1000000): random.randint(1, 1000000) agent.stop_cpu_profiler() ''' agent.start_allocation_profiler() mem1 = [] for i in range(0, 1000): obj1 = {'v': random.randint(0, 1000000)} mem1.append(obj1) agent.stop_allocation_profiler()
from __future__ import print_function import random import time import sys import threading sys.path.append(".") import stackimpact agent = stackimpact.start( agent_key = 'agent key here', app_name = 'MyPythonApp') def simulate_cpu_work(): for j in range(0, 100000): random.randint(1, 1000000) def handle_some_event(): span = agent.profile('some event') simulate_cpu_work() span.stop() response = { "statusCode": 200, "body": 'Done' } return response
from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation from keras.optimizers import SGD import numpy as np import sys sys.path.append(".") import stackimpact agent = stackimpact.start( agent_key = 'agent key here', app_name = 'MyKerasScript', auto_profiling = False) agent.start_tf_profiler() X = np.array([[0,0],[0,1],[1,0],[1,1]]) y = np.array([[0],[1],[1],[0]]) model = Sequential() model.add(Dense(8, input_dim=2)) model.add(Activation('tanh')) model.add(Dense(1)) model.add(Activation('sigmoid')) sgd = SGD(lr=0.1) model.compile(loss='binary_crossentropy', optimizer=sgd) model.fit(X, y, batch_size=1, nb_epoch=1000)
from __future__ import print_function import random import time import sys import threading import tensorflow as tf sys.path.append(".") import stackimpact agent = stackimpact.start( agent_key = 'agent key here', app_name = 'MyTensorFlowScript') def handle_some_event(): with agent.profile(): tf.reset_default_graph() x = tf.random_normal([1000, 1000]) y = tf.random_normal([1000, 1000]) res = tf.matmul(x, y) with tf.Session() as sess: sess.run(res) # Simulate events while True: handle_some_event() time.sleep(2)
def test_record_block_profile(self): if runtime_info.OS_WIN: return stackimpact._agent = None agent = stackimpact.start( dashboard_address = 'http://localhost:5001', agent_key = 'key1', app_name = 'TestPythonApp', auto_profiling = False, debug = True ) agent.block_reporter.profiler.reset() lock = threading.Lock() event = threading.Event() def lock_lock(): lock.acquire() time.sleep(0.5) lock.release() def lock_wait(): lock.acquire() lock.release() def event_lock(): time.sleep(0.5) event.set() def event_wait(): event.wait() def handler(): time.sleep(0.4) def url_wait(): server = TestServer(5010, 0.4, handler) server.start() urlopen('http://localhost:5010') server.join() result = {} def record(): agent.block_reporter.profiler.start_profiler() time.sleep(2) agent.block_reporter.profiler.stop_profiler() record_t = threading.Thread(target=record) record_t.start() # simulate lock t = threading.Thread(target=lock_lock) t.start() t = threading.Thread(target=lock_wait) t.start() # simulate event t = threading.Thread(target=event_lock) t.start() t = threading.Thread(target=event_wait) t.start() # simulate network t = threading.Thread(target=url_wait) t.start() # make sure signals are delivered in python 2, when main thread is waiting if runtime_info.PYTHON_2: while record_t.is_alive(): pass record_t.join() profile = agent.block_reporter.profiler.build_profile(2)[0]['profile'].to_dict() #print(profile) self.assertTrue('lock_wait' in str(profile)) self.assertTrue('event_wait' in str(profile)) self.assertTrue('url_wait' in str(profile)) agent.destroy()
from flask import Flask try: # python 2 from urllib2 import urlopen except ImportError: # python 3 from urllib.request import urlopen sys.path.append(".") import stackimpact # StackImpact agent initialization agent = stackimpact.start(agent_key=os.environ['AGENT_KEY'], dashboard_address=os.environ['DASHBOARD_ADDRESS'], app_name='ExamplePythonFlaskApp', app_version='1.0.0', debug=True) # Simulate CPU intensive work def simulate_cpu(): duration = 10 * 60 * 60 usage = 10 while True: for j in range(0, duration): for i in range(0, usage * 15000): text = "text1" + str(i) text = text + "text2"
from __future__ import print_function import stackimpact import random import threading import time import signal agent = stackimpact.start( agent_key = 'agent key here', app_name = 'LambdaDemoPython', app_environment = 'prod', block_profiler_disabled = True) def simulate_cpu_work(): for j in range(0, 100000): random.randint(1, 1000000) mem = [] def simulate_mem_leak(): for i in range(0, 1000): obj = {'v': random.randint(0, 1000000)} mem.append(obj) def handler(event, context): span = agent.profile() simulate_cpu_work() simulate_mem_leak() span.stop()
def test_record_block_profile(self): stackimpact._agent = None agent = stackimpact.start(dashboard_address='http://localhost:5001', agent_key='key1', app_name='TestPythonApp', debug=True) lock = threading.Lock() event = threading.Event() def lock_lock(): lock.acquire() time.sleep(0.5) lock.release() def lock_wait(): lock.acquire() lock.release() def event_lock(): time.sleep(0.5) event.set() def event_wait(): event.wait() def handler(): time.sleep(0.4) def url_wait(): server = TestServer(5010, 0.4, handler) server.start() urlopen('http://localhost:5010') server.join() result = {} def record(): agent.frame_selector.add_http_frame_regexp( os.path.join('tests', 'test_server.py')) agent.block_reporter.record(2) record_t = threading.Thread(target=record) record_t.start() # simulate lock t = threading.Thread(target=lock_lock) t.start() t = threading.Thread(target=lock_wait) t.start() # simulate event t = threading.Thread(target=event_lock) t.start() t = threading.Thread(target=event_wait) t.start() # simulate network t = threading.Thread(target=url_wait) t.start() # make sure signals are delivered in python 2, when main thread is waiting if runtime_info.PYTHON_2: while record_t.is_alive(): pass record_t.join() #print(agent.block_reporter.block_profile) #print(agent.block_reporter.http_profile) self.assertTrue('lock_wait' in str(agent.block_reporter.block_profile)) self.assertTrue( 'event_wait' in str(agent.block_reporter.block_profile)) self.assertTrue('url_wait' in str(agent.block_reporter.block_profile)) self.assertTrue('handler' in str(agent.block_reporter.http_profile)) agent.destroy()
try: # python 2 from urllib2 import urlopen except ImportError: # python 3 from urllib.request import urlopen sys.path.append(".") import stackimpact # StackImpact agent initialization agent = stackimpact.start( agent_key = os.environ['AGENT_KEY'], app_name = 'ExamplePythonFlaskApp', app_version = '1.0.0', debug = True) # Simulate CPU intensive work def simulate_cpu(): duration = 10 * 60 * 60 usage = 10 while True: for j in range(0, duration): for i in range(0, usage * 15000): text = "text1" + str(i) text = text + "text2"