def test_it_keeps_the_total_sum_of_the_seen_threads_count_values(self): sample1 = Sample(stacks=[[Frame("frame1")]], seen_threads_count=56) sample2 = Sample(stacks=[[Frame("frame1")]], seen_threads_count=78) self.subject.add(sample1) self.subject.add(sample2) assert (self.subject.total_seen_threads_count == (56 + 78))
def test_it_keeps_the_total_sum_of_the_attempted_sample_threads_count_values( self): sample1 = Sample(stacks=[[Frame("frame1")]], attempted_sample_threads_count=12) sample2 = Sample(stacks=[[Frame("frame1")]], attempted_sample_threads_count=34) self.subject.add(sample1) self.subject.add(sample2) assert (self.subject.total_attempted_sample_threads_count == (12 + 34))
def test_it_includes_correct_file_path_when_available_on_non_Windows_system( self): self.profile.add( Sample(stacks=[[ Frame("bottom_with_path", file_path="path/file1.py"), Frame("middle_with_path", file_path="path/file2.py"), Frame("top_without_path") ]])) assert (self.decoded_json_result()["callgraph"]["children"] ["path.file1:bottom_with_path"] == { "children": { "path.file2:middle_with_path": { "children": { "top_without_path": { "counts": { "WALL_TIME": 1 } } }, 'file': 'path/file2.py' } }, 'file': 'path/file1.py' })
def before(self): now_millis = int(time.time()) * 1000 five_minutes_ago_millis = now_millis - (5 * 60 * 1000) sample = Sample( stacks=[[Frame(MY_PROFILING_GROUP_NAME_FOR_INTEG_TESTS)]], attempted_sample_threads_count=1, seen_threads_count=1) self.profile = Profile(MY_PROFILING_GROUP_NAME_FOR_INTEG_TESTS, 1.0, 1.0, five_minutes_ago_millis) # FIXME: Remove adding the end time manually below after feature fully support self.profile.end = now_millis self.profile.add(sample) self.environment = { "should_profile": True, "profiling_group_name": MY_PROFILING_GROUP_NAME_FOR_INTEG_TESTS, "aws_session": boto3.session.Session(), "reporting_interval": timedelta(minutes=13), "sampling_interval": timedelta(seconds=1), "minimum_time_reporting": timedelta(minutes=6), "max_stack_depth": 2345, "cpu_limit_percentage": 29, "agent_metadata": AgentMetadata(fleet_info=DefaultFleetInfo()) } self.environment["codeguru_profiler_builder"] = CodeGuruClientBuilder( self.environment) self.agent_config = AgentConfiguration( should_profile=True, sampling_interval=self.environment["sampling_interval"], reporting_interval=self.environment["reporting_interval"], minimum_time_reporting=self.environment["minimum_time_reporting"], max_stack_depth=self.environment["max_stack_depth"], cpu_limit_percentage=self.environment["cpu_limit_percentage"])
def example_profile(): start_time = 1514764800000 end_time = 1514772000000 profile = Profile(profiling_group_name="TestProfilingGroupName", sampling_interval_seconds=1.0, host_weight=2, start=start_time, clock=lambda: 1514772000000) profile.add( Sample( stacks=[[Frame("bottom"), Frame("middle"), Frame("top")], [Frame("bottom"), Frame("middle"), Frame("different_top")], [Frame("bottom"), Frame("middle")]], attempted_sample_threads_count=10, seen_threads_count=15)) profile.end = end_time profile.set_overhead_ms(timedelta(milliseconds=256)) if platform.system() == "Windows": # In Windows, as time.process stays constant if no cpu time was used (https://bugs.python.org/issue37859), we # would need to manually override the cpu_time_seconds to ensure the test runs as expected profile.cpu_time_seconds = 0.123 return profile
def test_it_includes_line_when_available(self): self.profile.add( Sample(stacks=[[ Frame("bottom_with_line_no", line_no=123), Frame("middle_with_line_no", line_no=234), Frame("top_without_line_no") ], [ Frame("bottom_with_line_no", line_no=123), Frame("middle_with_line_no", line_no=345), Frame("top_without_line_no") ]])) assert (self.decoded_json_result()["callgraph"]["children"] ["bottom_with_line_no"] == { "children": { "middle_with_line_no": { "children": { "top_without_line_no": { "counts": { "WALL_TIME": 2 } } }, "line": [234, 345] } }, "line": [123] })
def test_it_handles_unicode_escape_correctly_on_Windows(self): self.profile.add( Sample(stacks=[[ Frame("bottom_with_path"), Frame("middle", file_path="C:/User/ironman/path/xs.py", class_name="ClassA"), Frame("top", file_path="C:\\User\\ironman\\path\\xs.py", class_name="ClassA") ]])) assert (self.decoded_json_result()["callgraph"]["children"] ["bottom_with_path"] == { "children": { "User.ironman.path.xs:ClassA:middle": { "children": { "User.ironman.path.xs:ClassA:top": { "file": "C:\\User\\ironman\\path\\xs.py", "counts": { "WALL_TIME": 1 } } }, "file": "C:\\User\\ironman\\path\\xs.py" } } })
def test_it_returns_the_average_thread_weight_for_the_samples_in_the_profile( self): sample = Sample(stacks=[[Frame("frame1")]], attempted_sample_threads_count=10, seen_threads_count=15) self.subject.add(sample) assert (self.subject.average_thread_weight() == 1.5)
def test_it_handles_unicode_escape_correctly_on_non_Windows_system(self): self.profile.add( Sample(stacks=[[ Frame("bottom_with_path"), Frame("top", file_path="C:\\User\\ironman\\path\\xs.py", class_name="ClassA") ]])) assert (self.decoded_json_result()["callgraph"]["children"] ["bottom_with_path"] == { "children": { "C:\\User\\ironman\\path\\xs:ClassA:top": { 'file': 'C:\\User\\ironman\\path\\xs.py', "counts": { "WALL_TIME": 1 } } } })
def sample(self): """ Samples stack traces of running threads (up to max_threads, and excluding excluded_threads) running in the current Python instance. Any exception encountered during sampling process will be propagated. """ all_threads = self._get_all_threads() all_threads_count = len(all_threads) threads_to_sample = self._threads_to_sample_from(all_threads) threads_to_sample_count = len(threads_to_sample) stacks = self._get_stacks( threads_to_sample=threads_to_sample, excluded_threads=self._excluded_threads, max_depth=AgentConfiguration.get().max_stack_depth) # Memory usage optimization del all_threads del threads_to_sample return Sample(stacks=stacks, attempted_sample_threads_count=threads_to_sample_count, seen_threads_count=all_threads_count)
def test_it_includes_class_name_when_available(self): self.profile.add( Sample(stacks=[[ Frame("bottom_with_path", class_name="ClassA"), Frame("middle_with_path", class_name="ClassB"), Frame("top_without_path") ]])) assert (self.decoded_json_result()["callgraph"]["children"] ["ClassA:bottom_with_path"] == { "children": { "ClassB:middle_with_path": { "children": { "top_without_path": { "counts": { "WALL_TIME": 1 } } } } } })
def test_it_handles_unicode_frames_correctly(self): self.profile.add( Sample(stacks=[[ Frame("unicode_bottom"), Frame(u"😉"), Frame(u"🙃") ]])) assert (self.decoded_json_result()["callgraph"]["children"] ["unicode_bottom"] == { "children": { u"😉": { "children": { u"🙃": { "counts": { "WALL_TIME": 1 } } } } } })
def before(self): super().before() self.mock_profile.get_memory_usage_bytes = MagicMock( return_value=DEFAULT_MEMORY_LIMIT_BYTES + 1) self.sample = Sample([["method1", "method2"]])
def before(self): super().before() self.mock_profile.get_memory_usage_bytes = MagicMock( return_value=DEFAULT_MEMORY_LIMIT_BYTES + 1) self.sample = Sample([["method1", "method2"]]) self.move_clock_to(INITIAL_MINIMUM_REPORTING_INTERVAL - ONE_SECOND)
def test_add_stack_set_profile_end(self): self.subject.add( Sample(stacks=[[Frame("frame1")]], attempted_sample_threads_count=12)) test_end_time = self.subject.start + 1000 assert self.subject.end == test_end_time
def test_add_stack(self, stacks, expected): sample = Sample(stacks=stacks) self.subject.add(sample) assert (_convert_profile_into_dict(self.subject) == expected)
def before(self): super().before() self.mock_profile.get_memory_usage_bytes = MagicMock( return_value=DEFAULT_MEMORY_LIMIT_BYTES - 1) self.sample = Sample([["method1", "method2"]]) self.move_clock_to(self.reporting_interval - ONE_SECOND)