Exemple #1
0
def extract_job_from_profile(profile):
    """Extracts information from profile about job, that was done to generate the profile.

    Fixme: Add assert that profile is profile
    Arguments:
        profile(dict): dictionary with valid profile

    Returns:
        Job: job according to the profile informations
    """
    assert 'collector_info' in profile.keys()
    collector_record = profile['collector_info']
    collector = Unit(collector_record['name'], collector_record['params'])

    assert 'postprocessors' in profile.keys()
    posts = []
    for postprocessor in profile['postprocessors']:
        posts.append(Unit(postprocessor['name'], postprocessor['params']))

    assert 'header' in profile.keys()
    cmd = profile['header']['cmd']
    params = profile['header']['params']
    workload = profile['header']['workload']

    return Job(collector, posts, cmd, workload, params)
Exemple #2
0
def test_collect_memory(capsys, helpers, pcs_full, memory_collect_job,
                        memory_collect_no_debug_job):
    """Test collecting the profile using the memory collector"""
    # Fixme: Add check that the profile was correctly generated
    before_object_count = helpers.count_contents_on_path(
        pcs_full.get_path())[0]
    head = vcs.get_minor_version_info(vcs.get_minor_head())
    memory_collect_job += ([head], )

    runner.run_single_job(*memory_collect_job)

    # Assert that nothing was removed
    after_object_count = helpers.count_contents_on_path(pcs_full.get_path())[0]
    assert before_object_count + 1 == after_object_count

    profiles = os.listdir(os.path.join(pcs_full.get_path(), 'jobs'))
    new_profile = profiles[0]
    assert len(profiles) == 1
    assert new_profile.endswith(".perf")

    cmd, args, _, colls, posts, _ = memory_collect_job
    runner.run_single_job(cmd, args, ["hello"], colls, posts, [head], **{
        'no_func': 'fun',
        'sampling': 0.1
    })

    profiles = os.listdir(os.path.join(pcs_full.get_path(), 'jobs'))
    new_smaller_profile = [p for p in profiles if p != new_profile][0]
    assert len(profiles) == 2
    assert new_smaller_profile.endswith(".perf")

    # Assert that nothing was removed
    after_second_object_count = helpers.count_contents_on_path(
        pcs_full.get_path())[0]
    assert after_object_count + 1 == after_second_object_count

    # Fixme: Add check that the profile was correctly generated

    memory_collect_no_debug_job += ([head], )
    runner.run_single_job(*memory_collect_no_debug_job)
    last_object_count = helpers.count_contents_on_path(pcs_full.get_path())[0]
    _, err = capsys.readouterr()
    assert after_second_object_count == last_object_count
    assert 'debug info' in err

    target_bin = memory_collect_job[0][0]
    collector_unit = Unit('memory', {'all': False, 'no_func': 'main'})
    job = Job('memory', [], str(target_bin), '', '')
    _, prof = runner.run_collector(collector_unit, job)

    assert len(list(query.all_resources_of(prof))) == 2

    collector_unit = Unit('memory', {
        'all': False,
        'no_source': 'memory_collect_test.c'
    })
    job = Job('memory', [], str(target_bin), '', '')
    _, prof = runner.run_collector(collector_unit, job)

    assert len(list(query.all_resources_of(prof))) == 0
Exemple #3
0
def run_postprocessor_on_profile(prof, postprocessor_name,
                                 postprocessor_params):
    """Run the job of the postprocessor according to the given profile.

    First extracts the information from the profile in order to construct the job,
    then runs the given postprocessor that is appended to the list of postprocessors
    of the profile, and the postprocessed profile is stored in the pending jobs.

    Arguments:
        prof(dict): dictionary with profile informations
        postprocessor_name(str): name of the postprocessor that we are using
        postprocessor_params(dict): parameters for the postprocessor

    Returns:
        PostprocessStatus: status how the postprocessing went
    """
    pcs = PCS(store.locate_perun_dir_on(os.getcwd()))
    profile_job = profile.extract_job_from_profile(prof)
    postprocessor_unit = Unit(postprocessor_name, postprocessor_params)
    profile_job.postprocessors.append(postprocessor_unit)

    p_status, processed_profile = run_postprocessor(postprocessor_unit,
                                                    profile_job, prof)
    if p_status == PostprocessStatus.OK and prof:
        store_generated_profile(pcs, processed_profile, profile_job)
    return p_status
Exemple #4
0
def run_postprocessor_on_profile(prof,
                                 postprocessor_name,
                                 postprocessor_params,
                                 skip_store=False):
    """Run the job of the postprocessor according to the given profile.

    First extracts the information from the profile in order to construct the job,
    then runs the given postprocessor that is appended to the list of postprocessors
    of the profile, and the postprocessed profile is stored in the pending jobs.

    :param dict prof: dictionary with profile informations
    :param str postprocessor_name: name of the postprocessor that we are using
    :param dict postprocessor_params: parameters for the postprocessor
    :param bool skip_store: if set to true, then the profil will not be stored
    :returns (PostprocessStatus, dict): status how the postprocessing went and the postprocessed
        profile
    """
    profile_job = profile.extract_job_from_profile(prof)
    postprocessor_unit = Unit(postprocessor_name, postprocessor_params)
    profile_job.postprocessors.append(postprocessor_unit)

    p_status, processed_profile = run_postprocessor(postprocessor_unit,
                                                    profile_job, prof)
    if p_status == PostprocessStatus.OK and prof and not skip_store:
        store_generated_profile(processed_profile, profile_job)
    return p_status, processed_profile
Exemple #5
0
def test_collect_memory_with_generator(pcs_full, memory_collect_job):
    """Tries to collect the memory with integer generators"""
    cmd = memory_collect_job[0][0]
    collector = Unit('memory', {})
    integer_job = Job(collector, [], cmd, '', '')
    integer_generator = IntegerGenerator(integer_job, 1, 3, 1)
    memory_profiles = list(integer_generator.generate(runner.run_collector))
    assert len(memory_profiles) == 1
Exemple #6
0
def test_file_generator():
    """Tests file generator"""
    collector = Unit('time', {})
    file_job = Job(collector, [], 'wc', '-l', '')
    file_generator = TextfileGenerator(file_job, 2, 5)

    for c_status, profile in file_generator.generate(runner.run_collector):
        assert c_status == CollectStatus.OK
        assert profile
        assert len(profile['global']['resources']) > 0
Exemple #7
0
def test_string_generator():
    """Tests string generator"""
    collector = Unit('time', {})
    string_job = Job(collector, [], 'echo', '', '')
    string_generator = StringGenerator(string_job, 10, 20, 1)

    for c_status, profile in string_generator.generate(runner.run_collector):
        assert c_status == CollectStatus.OK
        assert profile
        assert len(profile['global']['resources']) > 0
Exemple #8
0
def test_loading_generators_from_config(monkeypatch, pcs_full):
    """Tests loading generator specification from config"""
    # Initialize the testing configurations
    collector = Unit('time', {})
    integer_job = Job(collector, [], 'factor', '', '')
    temp_local = config.Config(
        'local', '', {
            'generators': {
                'workload': [{
                    'id': 'gen1',
                    'type': 'integer',
                    'min_range': 10,
                    'max_range': 20,
                    'step': 1
                }]
            }
        })
    temp_global = config.Config(
        'global', '', {
            'generators': {
                'workload': [{
                    'id': 'gen2',
                    'type': 'integer',
                    'min_range': 100,
                    'max_range': 200,
                    'step': 10
                }, {
                    'id': 'gen_incorrect',
                    'min_range': 100
                }, {
                    'id': 'gen_almost_correct',
                    'type': 'bogus'
                }]
            }
        })
    monkeypatch.setattr("perun.logic.config.local", lambda _: temp_local)
    monkeypatch.setattr("perun.logic.config.shared", lambda: temp_global)

    spec_map = workload.load_generator_specifications()
    assert len(spec_map.keys()) == 2
    assert 'gen1' in spec_map.keys()
    assert 'gen2' in spec_map.keys()
    assert 'gen_incorrect' not in spec_map.keys()
    assert 'gen_almost_correct' not in spec_map.keys()

    # Now test that the generators really work :P
    constructor, params = spec_map['gen1']
    for c_status, profile in constructor(integer_job, **params).generate(
            runner.run_collector):
        assert c_status == CollectStatus.OK
        assert profile
        assert len(profile['global']['resources'])
Exemple #9
0
    def construct_unit(unit, unit_type, ukwargs):
        """Helper function for constructing the {'name', 'params'} objects for collectors and posts.

        :param str unit: name of the unit (collector/postprocessor)
        :param str unit_type: name of the unit type (collector or postprocessor)
        :param dict ukwargs: dictionary of additional parameters
        :returns dict: dictionary of the form {'name', 'params'}
        """
        # Get the dictionaries for from string and from file params obtained from commandline
        unit_param_dict = ukwargs.get(unit_type + "_params", {}).get(unit, {})

        # Construct the object with name and parameters
        return Unit(unit, unit_param_dict)
Exemple #10
0
def test_singleton():
    """Tests singleton generator"""
    collector = Unit('time', {})
    integer_job = Job(collector, [], 'factor', '', '')
    singleton_generator = SingletonGenerator(integer_job, "10")

    job_count = 0
    for c_status, profile in singleton_generator.generate(
            runner.run_collector):
        assert c_status == CollectStatus.OK
        assert profile
        assert len(profile['global']['resources']) > 0
        job_count += 1
    assert job_count == 1
Exemple #11
0
def test_integer_generator():
    """Tests generation of integers from given range"""
    collector = Unit('time', {})
    integer_job = Job(collector, [], 'factor', '', '')
    integer_generator = IntegerGenerator(integer_job, 10, 100, 10)

    for c_status, profile in integer_generator.generate(runner.run_collector):
        assert c_status == CollectStatus.OK
        assert profile
        assert len(profile['global']['resources']) > 0

    # Try that the pure generator raises error
    pure_generator = Generator(integer_job)
    with pytest.raises(SystemExit):
        _ = list(pure_generator.generate(runner.run_collector))
Exemple #12
0
def test_integer_generator_for_each():
    """Tests the profile_for_each_workload option"""
    # When profile_for_each_workload is not set, we yield profiles for each workload
    collector = Unit('time', {})
    integer_job = Job(collector, [], 'factor', '', '')
    integer_generator = IntegerGenerator(integer_job,
                                         10,
                                         100,
                                         10,
                                         profile_for_each_workload=True)

    collection_pairs = list(integer_generator.generate(runner.run_collector))
    assert len(collection_pairs) == 10

    # When profile_for_each_workload is set, then we merge the resources
    integer_generator = IntegerGenerator(integer_job,
                                         10,
                                         100,
                                         10,
                                         profile_for_each_workload=False)
    collection_pairs = list(integer_generator.generate(runner.run_collector))
    assert len(collection_pairs) == 1