Ejemplo n.º 1
0
def test_degradation_between_profiles(pcs_with_degradations, capsys):
    """Set of basic tests for testing degradation between profiles

    Expects correct behaviour
    """
    pool_path = os.path.join(
        os.path.split(__file__)[0], 'degradation_profiles')
    profiles = [
        factory.load_profile_from_file(
            os.path.join(pool_path, 'linear_base.perf'), True),
        factory.load_profile_from_file(
            os.path.join(pool_path, 'linear_base_degradated.perf'), True),
        factory.load_profile_from_file(
            os.path.join(pool_path, 'quad_base.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'zero.perf'),
                                       True)
    ]
    # Cannot detect degradation using BMOE strategy betwen these pairs of profiles,
    # since the best models are same with good confidence
    result = list(bmoe.best_model_order_equality(profiles[0], profiles[1]))
    assert check.PerformanceChange.NoChange in [r.result for r in result]

    # Can detect degradation using BMOE strategy betwen these pairs of profiles
    result = list(bmoe.best_model_order_equality(profiles[1], profiles[2]))
    assert check.PerformanceChange.Degradation in [r.result for r in result]

    result = list(bmoe.best_model_order_equality(profiles[0], profiles[2]))
    assert check.PerformanceChange.Degradation in [r.result for r in result]

    result = list(aat.average_amount_threshold(profiles[1], profiles[2]))
    assert check.PerformanceChange.Degradation in [r.result for r in result]

    # Can detect optimizations both using BMOE and AAT
    result = list(aat.average_amount_threshold(profiles[2], profiles[1]))
    assert check.PerformanceChange.Optimization in [r.result for r in result]

    result = list(bmoe.best_model_order_equality(profiles[2], profiles[1]))
    assert check.PerformanceChange.Optimization in [r.result for r in result]
    # Try that we printed confidence
    deg_list = [(res, "", "") for res in result]
    log.print_list_of_degradations(deg_list)
    out, _ = capsys.readouterr()
    assert 'with confidence' in out

    # Try that nothing is wrong when the average is 0.0
    result = list(aat.average_amount_threshold(profiles[3], profiles[3]))
    # Assert that DegradationInfo was yield
    assert result
    # Assert there was no change
    assert check.PerformanceChange.NoChange in [r.result for r in result]
Ejemplo n.º 2
0
def get_single_profile_info(pcs, minor_version, profile_source):
    """Function for loading single performance profile info
    :param PCS pcs: object with performance control system wrapper
    :param str minor_version: commit to which the profiles belongs
    :param str profile_source: name of the performance profile
    :return: dictionary containing performance profile info
    """

    try:
        profiles_objs = commands.get_minor_version_profiles(pcs, minor_version);
        for num, profile_obj in enumerate(profiles_objs):
            if (profile_obj.source == profile_source):
                perf_profile = profile.load_profile_from_file(profile_obj.realpath, is_raw_profile=False)
                options = [o for o in query.all_resource_fields_of(perf_profile)]
                numerical = [o for o in query.all_numerical_resource_fields_of(perf_profile)]
                dataframe = convert.resources_to_pandas_dataframe(perf_profile)

                for option in options:
                    dataframe = dataframe[pandas.notnull(dataframe[option])]
                
                dataframe = dataframe.astype(str)
                resource_values = dataframe.to_dict(orient='records')

                formatted = formatter.format_single_profile_info(profile_obj, minor_version, options, numerical, resource_values)

                return formatted, json.dumps({'profile' : formatted})

        profiles_objs = commands.get_untracked_profiles(pcs);
        for num, profile_obj in enumerate(profiles_objs):
            if (profile_obj.source == profile_source):
                perf_profile = profile.load_profile_from_file(profile_obj.realpath, is_raw_profile=True)
                options = [o for o in query.all_resource_fields_of(perf_profile)]
                numerical = [o for o in query.all_numerical_resource_fields_of(perf_profile)]
                dataframe = convert.resources_to_pandas_dataframe(perf_profile)
                
                for option in options:
                    dataframe = dataframe[pandas.notnull(dataframe[option])]

                dataframe = dataframe.astype(str)
                resource_values = dataframe.to_dict(orient='records')

                formatted = formatter.format_single_profile_info(profile_obj, minor_version, options, numerical, resource_values)
                
                return formatted, json.dumps({'profile' : formatted})

        return create_response('Something went wrong', 404)

    except Exception as e:
        eprint(e)
        return create_response(e, 404)
Ejemplo n.º 3
0
def test_flame_graph(pcs_full, valid_profile_pool):
    """Test creating flame graph out of the memory profile

    Expecting no errors, and created flame.svg graph
    """
    runner = CliRunner()

    for valid_profile in valid_profile_pool:
        memory_profile = profiles.load_profile_from_file(valid_profile, is_raw_profile=True)
        if memory_profile['header']['type'] != 'memory':
            continue

        # First try to create the graph using the convential matters
        flamegraphs.draw_flame_graph(memory_profile, 'flame2.svg', 20)
        assert 'flame2.svg' in os.listdir(os.getcwd())

        # Next try to create it using the click
        result = runner.invoke(cli.show, [valid_profile, 'flamegraph'])

        assert result.exit_code == 0
        assert 'flame.svg' in os.listdir(os.getcwd())

        # Read the contents and do a partial compare (some stuff are random, so one cannot be sure)
        with open('flame.svg', 'r') as f1:
            first_contents = f1.readlines()

        with open('flame2.svg', 'r') as f2:
            second_contents = f2.readlines()

        assert len(first_contents) == len(second_contents)
Ejemplo n.º 4
0
def test_bars_cli(pcs_full, valid_profile_pool):
    """Test running and creating bokeh bar from the cli

    Expecting no errors and created bars.html file
    """
    runner = CliRunner()
    for valid_profile in valid_profile_pool:
        loaded_profile = profiles.load_profile_from_file(valid_profile,
                                                         is_raw_profile=True)
        if loaded_profile['header']['type'] != 'memory':
            continue

        # Test correct stacked
        result = runner.invoke(cli.show, [
            valid_profile, 'bars', '--of=amount', '--stacked', '--by=uid',
            '--filename=bars.html'
        ])
        assert result.exit_code == 0
        assert 'bars.html' in os.listdir(os.getcwd())

        # Test correct grouped
        result = runner.invoke(cli.show, [
            valid_profile, 'bars', '--of=amount', '--grouped', '--by=uid',
            '--filename=bars.html'
        ])
        assert result.exit_code == 0
        assert 'bars.html' in os.listdir(os.getcwd())
Ejemplo n.º 5
0
def test_flow_cli(pcs_full, valid_profile_pool):
    """Test runing and creating bokeh flow from the cli

    Expecting no errors and created flow file
    """
    runner = CliRunner()
    for valid_profile in valid_profile_pool:
        loaded_profile = profiles.load_profile_from_file(valid_profile,
                                                         is_raw_profile=True)
        if loaded_profile['header']['type'] != 'memory':
            continue

        # Classical run --- will accumulate the values
        assert 'flow.html' not in os.listdir(os.getcwd())
        result = runner.invoke(cli.show, [
            valid_profile, 'flow', '--of=amount', '--by=uid', '--stacked',
            '--filename=flow.html'
        ])

        assert result.exit_code == 0
        assert 'flow.html' in os.listdir(os.getcwd())

        # Run without accumulation
        result = runner.invoke(cli.show, [
            valid_profile, 'flow', '--of=amount', '--by=uid', '--stacked',
            '--no-accumulate', '--filename=flow2.html', '--graph-title=Test'
        ])
        assert result.exit_code == 0
        assert 'flow2.html' in os.listdir(os.getcwd())
Ejemplo n.º 6
0
def load_profile_from_args(profile_name, minor_version):
    """
    :param Profile profile_name: profile that will be stored for the minor version
    :param str minor_version: SHA-1 representation of the minor version
    :returns dict: loaded profile represented as dictionary
    """
    # If the profile is in raw form
    if not store.is_sha1(profile_name):
        _, minor_index_file = store.split_object_name(
            pcs.get_object_directory(), minor_version)
        # If there is nothing at all in the index, since it is not even created ;)
        #   we returning nothing otherwise we lookup entries in index
        if not os.path.exists(minor_index_file):
            return None
        with open(minor_index_file, 'rb') as minor_handle:
            lookup_pred = lambda entry: entry.path == profile_name
            profiles = store.lookup_all_entries_within_index(
                minor_handle, lookup_pred)
    else:
        profiles = [profile_name]

    # If there are more profiles we should chose
    if not profiles:
        return None
    chosen_profile = profiles[0]

    # Peek the type if the profile is correct and load the json
    _, profile_name = store.split_object_name(pcs.get_object_directory(),
                                              chosen_profile.checksum)
    profile_type = store.peek_profile_type(profile_name)
    if profile_type == PROFILE_MALFORMED:
        perun_log.error("malformed profile {}".format(profile_name))
    loaded_profile = profile.load_profile_from_file(profile_name, False)

    return loaded_profile
Ejemplo n.º 7
0
def load_all_profiles_in(directory):
    """Generates stream of loaded (i.e. dictionaries) profiles in the specified directory.

    Arguments:
        directory(str): the name (not path!) of the profile directory

    Returns:
        generator: stream of loaded profiles as tuple (profile_name, dictionary)
    """
    for profile in list(all_profiles_in(directory)):
        yield (profile, perun_profile.load_profile_from_file(profile, True))
Ejemplo n.º 8
0
def get_loaded_profiles(profile_type):
    """
    Arguments:
        profile_type(str): type of the profile we are looking for

    Returns:
        generator: stream of profiles of the given type
    """
    for valid_profile in filter(lambda p: 'err' not in p, all_profiles_in("to_add_profiles", True)):
        loaded_profile = perun_profile.load_profile_from_file(valid_profile, is_raw_profile=True)
        if loaded_profile['header']['type'] == profile_type:
            yield loaded_profile
Ejemplo n.º 9
0
def test_bars_cli_errors(helpers, pcs_full, valid_profile_pool):
    """Test running and creating bokeh bars from the cli with error simulations

    Expecting errors, but nothing destructive
    """
    runner = CliRunner()
    for valid_profile in valid_profile_pool:
        loaded_profile = profiles.load_profile_from_file(valid_profile,
                                                         is_raw_profile=True)
        if loaded_profile['header']['type'] != 'memory':
            continue

        # Try some bogus of parameter
        result = runner.invoke(
            cli.show,
            [valid_profile, 'bars', '--of=undefined', '--by=uid', '--stacked'])
        helpers.assert_invalid_cli_choice(result, 'undefined', 'bars.html')

        # Try some bogus function
        result = runner.invoke(cli.show, [
            valid_profile, 'bars', 'f', '--of=subtype', '--by=uid', '--stacked'
        ])
        helpers.assert_invalid_cli_choice(result, 'f', 'bars.html')

        # Try some bogus per key
        result = runner.invoke(cli.show, [
            valid_profile, 'bars', '--of=subtype', '--by=uid', '--stacked',
            '--per=dolan'
        ])
        helpers.assert_invalid_cli_choice(result, 'dolan', 'bars.html')

        # Try some bogus by key
        result = runner.invoke(cli.show, [
            valid_profile, 'bars', '--of=subtype', '--by=everything',
            '--stacked'
        ])
        helpers.assert_invalid_cli_choice(result, 'everything', 'bars.html')

        # Try some of key, that is not summable
        result = runner.invoke(
            cli.show,
            [valid_profile, 'bars', '--of=subtype', '--by=uid', '--stacked'])
        helpers.assert_invalid_param_choice(result, 'subtype', 'bars.html')

        # Try some of key, that is not summable, but is countable
        for valid_func in ('count', 'nunique'):
            result = runner.invoke(cli.show, [
                valid_profile, 'bars', valid_func, '--of=subtype', '--by=uid',
                '--per=snapshots'
            ])
            assert result.exit_code == 0
            assert 'bars.html' in os.listdir(os.getcwd())
Ejemplo n.º 10
0
def performance_profile_postprocess(repo_path, profile_realpath, specification, registered):
    """Function for postprocessing the given profile with a postprocessor given by specification
    :param str repo_path: path to repository
    :param str profile_realpath: path to profile
    :param dict specification: dictionary containging postprocessor specification
    :param bool registered: registration status of the profile
    :return: 200 OK if successfull, 404 NOT FOUND otherwise
    """
    original_path = os.getcwd()
    os.chdir(repo_path)

    # if (registered):
    #     raw = False
    # else:
    #     raw = True

    try:
        perf_profile = profile.load_profile_from_file(profile_realpath, is_raw_profile=(not registered))
        
        name = ''
        arguments = ''

        if (specification['name'].lower() == 'regression analysis'):
            name = 'regression_analysis'
            arguments = {'method': '', 'regression_models': '', 'steps': '', 'of_key': '', 'per_key': ''}
            for param in specification['parameters']:
                if (param['param'] == 'method'):
                    arguments['method'] = param['options'][0]
                elif (param['param'] == 'regression models'):
                    arguments['regression_models'] = param['options']
                elif (param['param'] == 'steps'):
                    arguments['steps'] = param['options'][0]
                elif (param['param'] == 'of'):  
                    arguments['of_key'] = param['options'][0]
                elif (param['param'] == 'depending on'):
                    arguments['per_key'] = param['options'][0]
        
        elif (specification['name'].lower() == 'normalizer'):
            arguments = {}
            name = 'normalizer'
        elif (specification['name'].lower() == 'filter'):
            arguments = {}
            name = 'filter'
        
        runner.run_postprocessor_on_profile(perf_profile, name, arguments)
        os.chdir(original_path)
        return create_response('Profile postprocessed successfully', 200)

    except Exception as e:
        os.chdir(original_path)
        eprint(e)
        return create_response(e, 404)
Ejemplo n.º 11
0
def profile_lookup_callback(ctx, _, value):
    """
    Arguments:
        ctx(click.core.Context): context
        _(click.core.Argument): param
        value(str): value of the profile parameter
    """
    # 0) First check if the value is tag or not
    index_tag_match = store.INDEX_TAG_REGEX.match(value)
    if index_tag_match:
        index_profile = commands.get_nth_profile_of(
            int(index_tag_match.group(1)), ctx.params['minor'])
        return profiles.load_profile_from_file(index_profile,
                                               is_raw_profile=False)

    pending_tag_match = store.PENDING_TAG_REGEX.match(value)
    if pending_tag_match:
        pending_profile = lookup_nth_pending_filename(
            int(pending_tag_match.group(1)))
        return profiles.load_profile_from_file(pending_profile,
                                               is_raw_profile=True)

    # 1) Check the index, if this is registered
    profile_from_index = commands.load_profile_from_args(
        value, ctx.params['minor'])
    if profile_from_index:
        return profile_from_index

    perun_log.info(
        "file '{}' not found in index. Checking filesystem...".format(value))
    # 2) Else lookup filenames and load the profile
    abs_path = lookup_profile_filename(value)
    if not os.path.exists(abs_path):
        perun_log.error("could not find the file '{}'".format(abs_path))

    return profiles.load_profile_from_file(abs_path, is_raw_profile=True)
Ejemplo n.º 12
0
def degradation_between_profiles(baseline_profile, target_profile):
    """Checks between pair of (baseline, target) profiles, whether the can be degradation detected

    We first find the suitable strategy for the profile configuration and then call the appropriate
    wrapper function.

    :param ProfileInfo baseline_profile: baseline against which we are checking the degradation
    :param ProfileInfo target_profile: profile corresponding to the checked minor version
    :returns: tuple (degradation result, degradation location, degradation rate)
    """
    if not isinstance(baseline_profile, dict):
        baseline_profile = profiles.load_profile_from_file(
            baseline_profile.realpath, False)
    if not isinstance(target_profile, dict):
        target_profile = profiles.load_profile_from_file(
            target_profile.realpath, False)

    # We run all of the degradation methods suitable for the given configuration of profile
    for degradation_method in get_strategies_for(baseline_profile):
        yield from utils.dynamic_module_function_call('perun.check',
                                                      degradation_method,
                                                      degradation_method,
                                                      baseline_profile,
                                                      target_profile)
Ejemplo n.º 13
0
def test_flow_cli_errors(helpers, pcs_full, valid_profile_pool):
    """Test running and creating bokeh flow from the cli with error simulations

    Expecting errors, but nothing destructive
    """
    runner = CliRunner()
    for valid_profile in valid_profile_pool:
        loaded_profile = profiles.load_profile_from_file(valid_profile,
                                                         is_raw_profile=True)
        if loaded_profile['header']['type'] != 'memory':
            continue

        result = runner.invoke(
            cli.show,
            [valid_profile, 'flow', '--of=undefined', '--by=uid', '--stacked'])
        helpers.assert_invalid_cli_choice(result, "undefined", 'flow.html')

        # Try some bogus function
        result = runner.invoke(cli.show, [
            valid_profile, 'flow', 'oracle', '--of=amount', '--by=uid',
            '--stacked'
        ])
        helpers.assert_invalid_cli_choice(result, 'oracle', 'flow.html')

        # Try some through key, that is not continuous
        result = runner.invoke(cli.show, [
            valid_profile, 'flow', '--of=amount', '--by=uid',
            '--through=subtype'
        ])
        helpers.assert_invalid_cli_choice(result, 'subtype', 'flow.html')

        # Try some of key, that is not summable
        result = runner.invoke(cli.show, [
            valid_profile, 'flow', '--of=subtype', '--by=uid',
            '--through=snapshots'
        ])
        helpers.assert_invalid_param_choice(result, 'subtype', 'flow.html')

        # Try some of key, that is not summable, but is countable
        for valid_func in ('count', 'nunique'):
            result = runner.invoke(cli.show, [
                valid_profile, 'flow', valid_func, '--of=subtype', '--by=uid',
                '--through=snapshots'
            ])
            assert result.exit_code == 0
            assert 'flow.html' in os.listdir(os.getcwd())
Ejemplo n.º 14
0
    def prepare_profile(dest_dir, profile, origin):
        """
        Arguments:
            dest_dir(str): destination of the prepared profile
            profile(str): name of the profile that is going to be stored in pending jobs
            origin(str): origin minor version for the given profile
        """
        # Copy to jobs and prepare origin for the current version
        shutil.copy2(profile, dest_dir)

        # Prepare origin for the current version
        copied_filename = os.path.join(dest_dir, os.path.split(profile)[-1])
        copied_profile = perun_profile.load_profile_from_file(copied_filename, is_raw_profile=True)
        copied_profile['origin'] = origin
        perun_profile.store_profile_at(copied_profile, copied_filename)
        shutil.copystat(profile, copied_filename)
        return copied_filename
Ejemplo n.º 15
0
    def prepare_profile(perun, profile, origin):
        """
        Arguments:
            pcs(PCS): perun control system wrapper
            profile(str): name of the profile that is going to be stored in pending jobs
            origin(str): origin minor version for the given profile
        """
        # Copy to jobs and prepare origin for the current version
        dest_dir = perun.get_job_directory()
        shutil.copy2(profile, dest_dir)

        # Prepare origin for the current version
        copied_filename = os.path.join(dest_dir, os.path.split(profile)[-1])
        copied_profile = perun_profile.load_profile_from_file(
            copied_filename, is_raw_profile=True)
        copied_profile['origin'] = origin
        perun_profile.store_profile_at(copied_profile, copied_filename)
        shutil.copystat(profile, copied_filename)
        return copied_filename
Ejemplo n.º 16
0
def test_strategies():
    """Set of basic tests for handling the strategies

    Expects correct behaviour
    """
    pool_path = os.path.join(
        os.path.split(__file__)[0], 'degradation_profiles')
    profile = factory.load_profile_from_file(
        os.path.join(pool_path, 'linear_base.perf'), True)
    rule = {
        'method': 'average_amount_threshold',
        'collector': 'complexity',
        'postprocessor': 'regression_analysis'
    }
    assert check.is_rule_applicable_for(rule, profile)

    rule = {
        'method': 'average_amount_threshold',
        'postprocessor': 'regression_analysis',
        'collector': 'complexity'
    }
    assert check.is_rule_applicable_for(rule, profile)

    rule = {
        'method': 'average_amount_threshold',
        'postprocessor': 'regression_analysis',
        'collector': 'memory'
    }
    assert not check.is_rule_applicable_for(rule, profile)

    rule = {
        'method': 'average_amount_threshold',
        'collector': 'complexity',
        'postprocessor': 'filter'
    }
    assert not check.is_rule_applicable_for(rule, profile)

    rule = {
        'method': 'average_amount_threshold',
        'collector': 'complexity',
        'cmd': 'bogus'
    }
    assert not check.is_rule_applicable_for(rule, profile)
Ejemplo n.º 17
0
def get_single_minor_version_profiles(pcs, minor_version, branch_name, repo_path):
    """Function for loading performance profiles of a single minor version
    :param PCS pcs: object with performance control system wrapper
    :param str minor_version: minor version SHA
    :return: list of performance profiles
    """
    output = []
    output_objs = []

    try:
        # registered profiles
        profiles_objs = commands.get_minor_version_profiles(pcs, minor_version);
        
        if (not profiles_objs):
            os.chdir(repo_path)

        for num, profile_obj in enumerate(profiles_objs):
            output.append(formatter.format_profiles(profile_obj, minor_version, True, 'remove', branch_name))
            output_objs.append(profile_obj)

        # pending profiles
        profiles_objs = commands.get_untracked_profiles(pcs);

        if (not profiles_objs):
            os.chdir(repo_path)

        for num, profile_obj in enumerate(profiles_objs):
            unpacked_profile = profile.load_profile_from_file(profile_obj.realpath, is_raw_profile=True)
            if (unpacked_profile['origin'] == minor_version):
                output.append(formatter.format_profiles(profile_obj, minor_version, False, 'register', branch_name))
                output_objs.append(profile_obj)
        
        return output_objs, output, json.dumps({'profiles' : output})

    except Exception as e:
        eprint(e)
        return '', '', create_response(e, 404)
Ejemplo n.º 18
0
def add(profile_names, minor_version, keep_profile=False):
    """Appends @p profile to the @p minor_version inside the @p pcs

    :param generator profile_names: generator of profiles that will be stored for the minor version
    :param str minor_version: SHA-1 representation of the minor version
    :param bool keep_profile: if true, then the profile that is about to be added will be not
        deleted, and will be kept as it is. By default false, i.e. profile is deleted.
    """
    added_profile_count = 0
    for profile_name in profile_names:
        # Test if the given profile exists (This should hold always, or not?)
        if not os.path.exists(profile_name):
            perun_log.error("{} does not exists".format(profile_name),
                            recoverable=True)
            continue

        # Load profile content
        # Unpack to JSON representation
        unpacked_profile = profile.load_profile_from_file(profile_name, True)

        if unpacked_profile['origin'] != minor_version:
            error_msg = "cannot add profile '{}' to minor index of '{}':".format(
                profile_name, minor_version)
            error_msg += "profile originates from minor version '{}'".format(
                unpacked_profile['origin'])
            perun_log.error(error_msg, recoverable=True)
            continue

        # Remove origin from file
        unpacked_profile.pop('origin')
        profile_content = profile.to_string(unpacked_profile)

        # Append header to the content of the file
        header = "profile {} {}\0".format(unpacked_profile['header']['type'],
                                          len(profile_content))
        profile_content = (header + profile_content).encode('utf-8')

        # Transform to internal representation - file as sha1 checksum and content packed with zlib
        profile_sum = store.compute_checksum(profile_content)
        compressed_content = store.pack_content(profile_content)

        # Add to control
        object_dir = pcs.get_object_directory()
        store.add_loose_object_to_dir(object_dir, profile_sum,
                                      compressed_content)

        # Register in the minor_version index
        store.register_in_index(object_dir, minor_version, profile_name,
                                profile_sum)

        # Remove the file
        if not keep_profile:
            os.remove(profile_name)

        added_profile_count += 1

    profile_names_len = len(profile_names)
    if added_profile_count != profile_names_len:
        perun_log.error(
            "only {}/{} profiles were successfully registered in index".format(
                added_profile_count, profile_names_len))
    perun_log.info("successfully registered {} profiles in index".format(
        added_profile_count))
Ejemplo n.º 19
0
def test_degradation_with_method(pcs_with_degradations, capsys):
    """Set of basic tests for testing degradation between profiles

    Expects correct behaviour
    """

    # loading the profiles
    pool_path = os.path.join(os.path.split(__file__)[0], 'degradation_profiles')
    profiles = [
        factory.load_profile_from_file(os.path.join(pool_path, 'const1.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'const2.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'const3.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'const4.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'lin1.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'lin2.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'lin3.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'lin4.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'log1.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'log2.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'log3.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'log4.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'quad1.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'quad2.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'quad3.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'quad4.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'pow1.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'pow2.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'pow3.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'pow4.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'exp1.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'exp2.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'exp3.perf'), True),
        factory.load_profile_from_file(os.path.join(pool_path, 'exp4.perf'), True)
    ]

    # CONSTANT MODEL -------------------------------------------- CONSTANT MODEL

    # CONSTANT ERROR
    result = list(lreg.linear_regression(profiles[0], profiles[1]))
    assert PerformanceChange.Degradation in [r.result for r in result]
    assert 'constant' in [r.type for r in result] #
    assert 999 in [round(r.rate_degradation) for r in result]

    # CONSTANT IMPROVEMENT
    result = list(lreg.linear_regression(profiles[1], profiles[0]))
    assert PerformanceChange.Optimization in [r.result for r in result]
    assert 'constant' in [r.type for r in result] #
    assert -91 in [round(r.rate_degradation) for r in result]

    # LINEAR ERROR
    result = list(lreg.linear_regression(profiles[0], profiles[2]))
    assert PerformanceChange.Degradation in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert 5993 in [round(r.rate_degradation) for r in result]

    # LINEAR IMPROVEMENT
    result = list(lreg.linear_regression(profiles[2], profiles[0]))
    assert PerformanceChange.Optimization in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert -98 in [round(r.rate_degradation) for r in result]

    # QUADRATIC ERROR
    result = list(lreg.linear_regression(profiles[0], profiles[3]))
    assert PerformanceChange.Degradation in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert 966206052007956736 in [round(r.rate_degradation) for r in result]

    # QUADRATIC IMPROVEMENT
    result = list(lreg.linear_regression(profiles[3], profiles[0]))
    assert PerformanceChange.Optimization in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert -98 in [round(r.rate_degradation) for r in result]

    # LINEAR MODEL -------------------------------------------- LINEAR MODEL

    # CONSTANT ERROR
    result = list(lreg.linear_regression(profiles[4], profiles[5]))
    assert PerformanceChange.Degradation in [r.result for r in result]
    assert 'constant' in [r.type for r in result] #
    assert 55 in [round(r.rate_degradation) for r in result]

    # CONSTANT IMPROVEMENT
    result = list(lreg.linear_regression(profiles[5], profiles[4]))
    assert PerformanceChange.MaybeOptimization in [r.result for r in result]
    assert 'constant' in [r.type for r in result] #
    assert -24 in [round(r.rate_degradation) for r in result]

    # LINEAR ERROR
    result = list(lreg.linear_regression(profiles[4], profiles[6]))
    assert PerformanceChange.MaybeDegradation in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert 20 in [round(r.rate_degradation) for r in result]

    # LINEAR IMPROVEMENT
    result = list(lreg.linear_regression(profiles[6], profiles[4]))
    assert PerformanceChange.MaybeOptimization in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert -17 in [round(r.rate_degradation) for r in result]

    # QUADRATIC ERROR
    result = list(lreg.linear_regression(profiles[4], profiles[7]))
    assert PerformanceChange.MaybeDegradation in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert 7 in [round(r.rate_degradation) for r in result]

    # QUADRATIC IMPROVEMENT
    result = list(lreg.linear_regression(profiles[7], profiles[4]))
    assert PerformanceChange.Optimization in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert -33 in [round(r.rate_degradation) for r in result]

    # LOGARITHMIC MODEL -------------------------------------------- LOGARITHMIC MODEL

    # CONSTANT ERROR
    result = list(lreg.linear_regression(profiles[8], profiles[9]))
    assert PerformanceChange.NoChange in [r.result for r in result]

    # CONSTANT IMPROVEMENT
    result = list(lreg.linear_regression(profiles[9], profiles[8]))
    assert PerformanceChange.NoChange in [r.result for r in result]

    # LINEAR ERROR
    result = list(lreg.linear_regression(profiles[8], profiles[10]))
    assert PerformanceChange.MaybeDegradation in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert 20 in [round(r.rate_degradation) for r in result]

    # LINEAR IMPROVEMENT
    result = list(lreg.linear_regression(profiles[10], profiles[8]))
    assert PerformanceChange.MaybeOptimization in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert -17 in [round(r.rate_degradation) for r in result]

    # QUADRATIC ERROR
    result = list(lreg.linear_regression(profiles[8], profiles[11]))
    assert PerformanceChange.Degradation in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert 36 in [round(r.rate_degradation) for r in result]

    # QUADRATIC IMPROVEMENT
    result = list(lreg.linear_regression(profiles[11], profiles[8]))
    assert PerformanceChange.MaybeOptimization in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert 57 in [round(r.rate_degradation) for r in result]

    # QUADRATIC MODEL -------------------------------------------- QUADRATIC MODEL

    # CONSTANT ERROR
    result = list(lreg.linear_regression(profiles[12], profiles[13]))
    assert PerformanceChange.Degradation in [r.result for r in result]
    assert 'constant' in [r.type for r in result] #
    assert 27 in [round(r.rate_degradation) for r in result]

    # CONSTANT IMPROVEMENT
    result = list(lreg.linear_regression(profiles[13], profiles[12]))
    assert PerformanceChange.MaybeOptimization in [r.result for r in result]
    assert 'constant' in [r.type for r in result] #
    assert -21 in [round(r.rate_degradation) for r in result]

    # LINEAR ERROR
    result = list(lreg.linear_regression(profiles[12], profiles[14]))
    assert PerformanceChange.MaybeDegradation in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert 19 in [round(r.rate_degradation) for r in result]

    # LINEAR IMPROVEMENT
    result = list(lreg.linear_regression(profiles[14], profiles[12]))
    assert PerformanceChange.MaybeOptimization in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert -16 in [round(r.rate_degradation) for r in result]

    # QUADRATIC ERROR
    result = list(lreg.linear_regression(profiles[12], profiles[15]))
    assert PerformanceChange.Degradation in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert 43 in [round(r.rate_degradation) for r in result]

    # QUADRATIC IMPROVEMENT
    result = list(lreg.linear_regression(profiles[15], profiles[12]))
    assert PerformanceChange.Optimization in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert -28 in [round(r.rate_degradation) for r in result]

    # POWER MODEL -------------------------------------------- POWER MODEL

    # CONSTANT ERROR
    result = list(lreg.linear_regression(profiles[16], profiles[17]))
    assert PerformanceChange.Degradation in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert 5219 in [round(r.rate_degradation) for r in result]

    # CONSTANT IMPROVEMENT
    result = list(lreg.linear_regression(profiles[17], profiles[16]))
    assert PerformanceChange.Optimization in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert -98 in [round(r.rate_degradation) for r in result]

    # LINEAR ERROR
    result = list(lreg.linear_regression(profiles[16], profiles[18]))
    assert PerformanceChange.NoChange in [r.result for r in result]

    # LINEAR IMPROVEMENT
    result = list(lreg.linear_regression(profiles[18], profiles[16]))
    assert PerformanceChange.NoChange in [r.result for r in result]

    # QUADRATIC ERROR
    result = list(lreg.linear_regression(profiles[19], profiles[16]))
    assert PerformanceChange.Degradation in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert 14016 in [round(r.rate_degradation) for r in result]

    # QUADRATIC IMPROVEMENT
    result = list(lreg.linear_regression(profiles[16], profiles[19]))
    assert PerformanceChange.Optimization in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert -99 in [round(r.rate_degradation) for r in result]

    # EXPONENTIAL MODEL -------------------------------------------- EXPONENTIAL MODEL

    # CONSTANT ERROR
    result = list(lreg.linear_regression(profiles[20], profiles[21]))
    assert PerformanceChange.Degradation in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert 38 in [round(r.rate_degradation) for r in result]

    # CONSTANT IMPROVEMENT
    result = list(lreg.linear_regression(profiles[21], profiles[20]))
    assert PerformanceChange.MaybeOptimization in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert -24 in [round(r.rate_degradation) for r in result]

    # LINEAR ERROR
    result = list(lreg.linear_regression(profiles[20], profiles[22]))
    assert PerformanceChange.MaybeDegradation in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert 21 in [round(r.rate_degradation) for r in result]

    # LINEAR IMPROVEMENT
    result = list(lreg.linear_regression(profiles[22], profiles[20]))
    assert PerformanceChange.MaybeOptimization in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert -18 in [round(r.rate_degradation) for r in result]

    # QUADRATIC ERROR
    result = list(lreg.linear_regression(profiles[20], profiles[23]))
    assert PerformanceChange.Degradation in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert 44 in [round(r.rate_degradation) for r in result]

    # QUADRATIC IMPROVEMENT
    result = list(lreg.linear_regression(profiles[23], profiles[20]))
    assert PerformanceChange.Optimization in [r.result for r in result]
    assert 'linear' in [r.type for r in result] #
    assert -29 in [round(r.rate_degradation) for r in result]
Ejemplo n.º 20
0
def test_sliding_window(pcs_full):
    """Tests sliding window method"""
    runner = CliRunner()
    result = runner.invoke(cli.postprocessby,
                           ["0@i", "clusterizer", "-s", "sliding_window"])
    assert result.exit_code == 0

    pool_path = os.path.join(os.path.split(__file__)[0], 'clustering_profiles')
    clustered_profile = factory.load_profile_from_file(
        os.path.join(pool_path, 'clustering-workload.perf'), True)

    postprocessed_profile = copy.deepcopy(clustered_profile)
    params = {
        'window_width': 2,
        'width_measure': 'absolute',
        'window_height': 4,
        'height_measure': 'absolute'
    }
    clusterizer.postprocess(postprocessed_profile, 'sliding_window', **params)
    malloced = get_malloced_resources(postprocessed_profile)
    # Assert we clustered resources to five clusters only
    assert max(res['cluster'] for res in malloced) == 5

    postprocessed_profile = copy.deepcopy(clustered_profile)
    params = {
        'window_width': 20,
        'width_measure': 'absolute',
        'window_height': 40,
        'height_measure': 'absolute'
    }
    clusterizer.postprocess(postprocessed_profile, 'sliding_window', **params)
    malloced = get_malloced_resources(postprocessed_profile)
    # Assert we clustered resources to one clusters only, because the window is big
    assert max(res['cluster'] for res in malloced) == 1

    postprocessed_profile = copy.deepcopy(clustered_profile)
    params = {
        'window_width': 0.5,
        'width_measure': 'relative',
        'window_height': 40,
        'height_measure': 'absolute'
    }
    clusterizer.postprocess(postprocessed_profile, 'sliding_window', **params)
    malloced = get_malloced_resources(postprocessed_profile)
    # Assert we clustered resources to two clusters only
    assert max(res['cluster'] for res in malloced) == 2

    # Try noexistant or unsupported options
    with pytest.raises(SystemExit):
        params = {
            'window_width': 0.5,
            'width_measure': 'weighted',
            'window_height': 40,
            'height_measure': 'absolute'
        }
        clusterizer.postprocess(postprocessed_profile, 'sliding_window',
                                **params)

    with pytest.raises(SystemExit):
        params = {
            'window_width': 0.5,
            'width_measure': 'nonexistant',
            'window_height': 40,
            'height_measure': 'absolute'
        }
        clusterizer.postprocess(postprocessed_profile, 'sliding_window',
                                **params)

    with pytest.raises(SystemExit):
        params = {
            'window_width': 0.5,
            'width_measure': 'absolute',
            'window_height': 40,
            'height_measure': 'nonexistant'
        }
        clusterizer.postprocess(postprocessed_profile, 'sliding_window',
                                **params)