def job(**kwargs): """Run specified batch of perun jobs to generate profiles. This command correspond to running one isolated batch of profiling jobs, outside of regular profilings. Run ``perun run matrix``, after specifying job matrix in local configuration to automate regular profilings of your project. After the batch is generated, each profile is taged with :preg:`origin` set to current ``HEAD``. This serves as check to not assing such profiles to different minor versions. By default the profiles computed by this batch job are stored inside the ``.perun/jobs/`` directory as a files in form of:: bin-collector-workload-timestamp.perf In order to store generated profiles run the following, with ``i@p`` corresponding to `pending tag`, which can be obtained by running ``perun status``:: perun add i@p .. code-block:: bash perun run job -c time -b ./mybin -w file.in -w file2.in -p normalizer This command profiles two commands ``./mybin file.in`` and ``./mybin file2.in`` and collects the profiling data using the :ref:`collectors-time`. The profiles are afterwards normalized with the :ref:`postprocessors-normalizer`. .. code-block:: bash perun run job -c complexity -b ./mybin -w sll.cpp -cp complexity targetdir=./src This commands runs one job './mybin sll.cpp' using the :ref:`collectors-complexity`, which uses custom binaries targeted at ``./src`` directory. .. code-block:: bash perun run job -c mcollect -b ./mybin -b ./otherbin -w input.txt -p normalizer -p regression_analysis This commands runs two jobs ``./mybin input.txt`` and ``./otherbin input.txt`` and collects the profiles using the :ref:`collectors-memory`. The profiles are afterwards postprocessed, first using the :ref:`postprocessors-normalizer` and then with :ref:`postprocessors-regression-analysis`. Refer to :doc:`jobs` and :doc:`profile` for more details about automation and lifetimes of profiles. For list of available collectors and postprocessors refer to :ref:`collectors-list` and :ref:`postprocessors-list` respectively. """ runner.run_single_job(**kwargs)
def test_collect_complexity(helpers, pcs_full, complexity_collect_job): """Test collecting the profile using complexity collector""" before_object_count = helpers.count_contents_on_path(pcs_full.path)[0] cmd, args, work, collectors, posts, config = complexity_collect_job runner.run_single_job(cmd, args, work, collectors, posts, **config) # Assert that nothing was removed after_object_count = helpers.count_contents_on_path(pcs_full.path)[0] assert before_object_count + 1 == after_object_count profiles = os.listdir(os.path.join(pcs_full.path, 'jobs')) new_profile = profiles[0] assert len(profiles) == 1 assert new_profile.endswith(".perf")
def test_collect_time(helpers, pcs_full, capsys): """Test collecting the profile using the time collector""" # Count the state before running the single job before_object_count = helpers.count_contents_on_path(pcs_full.path)[0] runner.run_single_job(["echo"], "", ["hello"], ["time"], []) # Assert outputs out, err = capsys.readouterr() assert err == '' assert 'Successfully collected data from echo' in out # Assert that just one profile was created after_object_count = helpers.count_contents_on_path(pcs_full.path)[0] assert before_object_count + 1 == after_object_count profiles = os.listdir(os.path.join(pcs_full.path, 'jobs')) new_profile = profiles[0] assert len(profiles) == 1 assert new_profile.startswith("echo-time-hello") assert new_profile.endswith(".perf")
def test_collect_trace(monkeypatch, helpers, pcs_full, trace_collect_job): """Test collecting the profile using trace collector""" head = vcs.get_minor_version_info(vcs.get_minor_head()) monkeypatch.setattr(stap, 'systemtap_collect', _mocked_stap) before_object_count = helpers.count_contents_on_path( pcs_full.get_path())[0] cmd, args, work, collectors, posts, config = trace_collect_job config['collector_params']['trace']['binary'] = os.path.join( os.path.dirname(__file__), 'collect_trace', 'tst') runner.run_single_job(cmd, args, work, collectors, posts, [head], **config) # Assert that nothing was removed after_object_count = helpers.count_contents_on_path(pcs_full.get_path())[0] assert before_object_count + 1 == after_object_count profiles = os.listdir(os.path.join(pcs_full.get_path(), 'jobs')) new_profile = profiles[0] assert len(profiles) == 1 assert new_profile.endswith(".perf")
def test_collect_memory(capsys, helpers, pcs_full, memory_collect_job, memory_collect_no_debug_job): """Test collecting the profile using the memory collector""" # Fixme: Add check that the profile was correctly generated before_object_count = helpers.count_contents_on_path( pcs_full.get_path())[0] head = vcs.get_minor_version_info(vcs.get_minor_head()) memory_collect_job += ([head], ) runner.run_single_job(*memory_collect_job) # Assert that nothing was removed after_object_count = helpers.count_contents_on_path(pcs_full.get_path())[0] assert before_object_count + 1 == after_object_count profiles = os.listdir(os.path.join(pcs_full.get_path(), 'jobs')) new_profile = profiles[0] assert len(profiles) == 1 assert new_profile.endswith(".perf") cmd, args, _, colls, posts, _ = memory_collect_job runner.run_single_job(cmd, args, ["hello"], colls, posts, [head], **{ 'no_func': 'fun', 'sampling': 0.1 }) profiles = os.listdir(os.path.join(pcs_full.get_path(), 'jobs')) new_smaller_profile = [p for p in profiles if p != new_profile][0] assert len(profiles) == 2 assert new_smaller_profile.endswith(".perf") # Assert that nothing was removed after_second_object_count = helpers.count_contents_on_path( pcs_full.get_path())[0] assert after_object_count + 1 == after_second_object_count # Fixme: Add check that the profile was correctly generated memory_collect_no_debug_job += ([head], ) runner.run_single_job(*memory_collect_no_debug_job) last_object_count = helpers.count_contents_on_path(pcs_full.get_path())[0] _, err = capsys.readouterr() assert after_second_object_count == last_object_count assert 'debug info' in err target_bin = memory_collect_job[0][0] collector_unit = Unit('memory', {'all': False, 'no_func': 'main'}) job = Job('memory', [], str(target_bin), '', '') _, prof = runner.run_collector(collector_unit, job) assert len(list(query.all_resources_of(prof))) == 2 collector_unit = Unit('memory', { 'all': False, 'no_source': 'memory_collect_test.c' }) job = Job('memory', [], str(target_bin), '', '') _, prof = runner.run_collector(collector_unit, job) assert len(list(query.all_resources_of(prof))) == 0
def test_collect_trace_fail(monkeypatch, helpers, pcs_full, trace_collect_job): """Test failed collecting using trace collector""" global _mocked_stap_code global _mocked_stap_file head = vcs.get_minor_version_info(vcs.get_minor_head()) monkeypatch.setattr(stap, 'systemtap_collect', _mocked_stap) before_object_count = helpers.count_contents_on_path( pcs_full.get_path())[0] # Test malformed file that ends in unexpected way _mocked_stap_file = 'record_malformed.txt' cmd, args, work, collectors, posts, config = trace_collect_job runner.run_single_job(cmd, args, work, collectors, posts, [head], **config) # Assert that nothing was added after_object_count = helpers.count_contents_on_path(pcs_full.get_path())[0] assert before_object_count == after_object_count # Test malformed file that ends in another unexpected way _mocked_stap_file = 'record_malformed2.txt' runner.run_single_job(cmd, args, work, collectors, posts, [head], **config) # Assert that nothing was added after_object_count = helpers.count_contents_on_path(pcs_full.get_path())[0] assert before_object_count == after_object_count # Simulate the failure of the systemTap _mocked_stap_code = 1 runner.run_single_job(cmd, args, work, collectors, posts, [head], **config) # Assert that nothing was added after_object_count = helpers.count_contents_on_path(pcs_full.get_path())[0] assert before_object_count == after_object_count
def test_collect_time(monkeypatch, helpers, pcs_full, capsys): """Test collecting the profile using the time collector""" # Count the state before running the single job before_object_count = helpers.count_contents_on_path( pcs_full.get_path())[0] head = vcs.get_minor_version_info(vcs.get_minor_head()) runner.run_single_job(["echo"], "", ["hello"], ["time"], [], [head]) # Assert outputs out, err = capsys.readouterr() assert err == '' assert 'Successfully collected data from echo' in out # Assert that just one profile was created after_object_count = helpers.count_contents_on_path(pcs_full.get_path())[0] assert before_object_count + 1 == after_object_count profiles = os.listdir(os.path.join(pcs_full.get_path(), 'jobs')) new_profile = profiles[0] assert len(profiles) == 1 assert new_profile.endswith(".perf") # Test running time with error runner.run_single_job(["echo"], "", ["hello"], ["time"], [], [head]) def collect_raising_exception(**kwargs): raise Exception("Something happened lol!") monkeypatch.setattr("perun.collect.time.run.collect", collect_raising_exception) runner.run_single_job(["echo"], "", ["hello"], ["time"], [], [head]) _, err = capsys.readouterr() assert 'Something happened lol!' in err
def test_collect_memory(capsys, helpers, pcs_full, memory_collect_job, memory_collect_no_debug_job): """Test collecting the profile using the memory collector""" # Fixme: Add check that the profile was correctly generated before_object_count = helpers.count_contents_on_path(pcs_full.path)[0] runner.run_single_job(*memory_collect_job) # Assert that nothing was removed after_object_count = helpers.count_contents_on_path(pcs_full.path)[0] assert before_object_count + 1 == after_object_count profiles = os.listdir(os.path.join(pcs_full.path, 'jobs')) new_profile = profiles[0] assert len(profiles) == 1 assert new_profile.endswith(".perf") cmd, args, _, colls, posts = memory_collect_job runner.run_single_job(cmd, args, ["hello"], colls, posts, **{ 'no_func': 'fun', 'sampling': 0.1 }) profiles = os.listdir(os.path.join(pcs_full.path, 'jobs')) print(profiles) new_smaller_profile = [p for p in profiles if p != new_profile][0] assert len(profiles) == 2 assert new_smaller_profile.endswith(".perf") # Assert that nothing was removed after_second_object_count = helpers.count_contents_on_path( pcs_full.path)[0] assert after_object_count + 1 == after_second_object_count # Fixme: Add check that the profile was correctly generated runner.run_single_job(*memory_collect_no_debug_job) last_object_count = helpers.count_contents_on_path(pcs_full.path)[0] _, err = capsys.readouterr() assert after_second_object_count == last_object_count assert 'debug info' in err