Example #1
0
def get_strategies_for(profile):
    """Retrieves the best strategy for the given profile configuration

    :param ProfileInfo profile: Profile information with configuration tuple
    :return: method to be used for checking degradation between profiles of
        the same configuration type
    """
    # Retrieve the application strategy
    try:
        application_strategy = config.lookup_key_recursively(
            'degradation.apply')
    except exceptions.MissingConfigSectionException:
        log.error(
            "'degradation.apply' could not be found in any configuration\n"
            "Run either 'perun config --local edit' or 'perun config --shared edit' and set "
            " the 'degradation.apply' to suitable value (either 'first' or 'all')."
        )

    # Retrieve all of the strategies from configuration
    strategies = config.gather_key_recursively('degradation.strategies')
    already_applied_strategies = []
    first_applied = False
    for strategy in strategies:
        if (application_strategy == 'all' or not first_applied) \
                and is_rule_applicable_for(strategy, profile)\
                and 'method' in strategy.keys()\
                and strategy['method'] not in already_applied_strategies:
            first_applied = True
            method = parse_strategy(strategy['method'])
            already_applied_strategies.append(method)
            yield method
Example #2
0
def compute_window_width(_, window_width, width_measure, resource_number):
    """Computes the sliding window width for the next cluster

    The computation of the new width of the cluster is dependent on the used measure,
    which can be either relative, absolute or weighted. The absolute size of the window
    takes always the same number of similar values; the relative size takes the number
    relative (i.e. the percentage) to the size of the resources; while weighted takes
    the number of resources w.r.t frequency of their values (i.e. the more common amounts
    are clustered in more fine way).

    :param dict _: currently processed resource, at this moment unused
    :param float window_width: the rate of the window width, dependent on width_measure,
        either percents or absolute number
    :param str width_measure: type of the width measure (absolute, relative or weighted)
    :param int resource_number: number of resources
    :return: computed width for new sliding window
    """
    if width_measure == 'absolute':
        return window_width
    elif width_measure == 'relative':
        return resource_number * window_width
    elif width_measure == 'weighted':
        log.error("not supported window width measure")
    else:
        log.error("not supported window width measure")
Example #3
0
def run_collector(collector, job):
    """Run the job of collector of the given name.

    Tries to look up the module containing the collector specified by the
    collector name, and then runs it with the parameters and returns collected profile.

    Arguments:
        collector(Unit): object representing the collector
        job(Job): additional information about the running job

    Returns:
        (int, dict): status of the collection, generated profile
    """
    log.print_current_phase("Collecting data by {}", collector.name,
                            COLLECT_PHASE_COLLECT)

    try:
        collector_module = get_module('perun.collect.{0}.run'.format(
            collector.name))
    except ImportError:
        return CollectStatus.ERROR, "{} does not exist".format(
            collector.name), {}

    # First init the collector by running the before phases (if it has)
    job_params = utils.merge_dictionaries(job._asdict(), collector.params)
    collection_status, collection_msg, prof \
        = run_all_phases_for(collector_module, 'collector', job_params)

    if collection_status != CollectStatus.OK:
        log.error(collection_msg, recoverable=True)
    else:
        print("Successfully collected data from {}".format(job.cmd))

    return collection_status, prof
Example #4
0
def run_postprocessor(postprocessor, job, prof):
    """Run the job of postprocess of the given name.

    Tries to look up the module containing the postprocessor specified by the
    postprocessor name, and then runs it with the parameters and returns processed
    profile.

    :param Unit postprocessor: dictionary representing the postprocessor
    :param Job job: additional information about the running job
    :param dict prof: dictionary with profile
    :returns (int, dict): status of the collection, postprocessed profile
    """
    log.print_current_phase("Postprocessing data with {}", postprocessor.name,
                            COLLECT_PHASE_POSTPROCESS)

    try:
        postprocessor_module = get_module('perun.postprocess.{0}.run'.format(
            postprocessor.name))
    except ImportError:
        err_msg = "{} does not exist".format(postprocessor.name)
        log.error(err_msg, recoverable=True)
        return PostprocessStatus.ERROR, {}

    # First init the collector by running the before phases (if it has)
    job_params = utils.merge_dict_range(job._asdict(), {'profile': prof},
                                        postprocessor.params)
    post_status, post_msg, prof \
        = run_all_phases_for(postprocessor_module, 'postprocessor', job_params)

    if post_status != PostprocessStatus.OK:
        log.error(post_msg)
    print("Successfully postprocessed data by {}".format(postprocessor.name))

    return post_status, prof
Example #5
0
def lookup_shared_config_dir():
    """Performs a lookup of the shared config dir on the given platform.

    First we check if PERUN_CONFIG_DIR environmental variable is set, otherwise, we try to expand
    the home directory of the user and according to the platform we return the sane location.

    On windows systems, we use the AppData\\Local\\perun directory in the user space, on linux
    system we use the ~/.config/perun. Other platforms are not supported, however can be initialized
    using the PERUN_CONFIG_DIR.

    :returns: dir, where the shared config will be stored
    """
    environment_dir = os.environ.get('PERUN_CONFIG_DIR')
    if environment_dir:
        return environment_dir

    home_directory = os.path.expanduser("~")

    if sys.platform == 'win32':
        perun_config_dir = os.path.join(home_directory, 'AppData', 'Local',
                                        'perun')
    elif sys.platform == 'linux':
        perun_config_dir = os.path.join(home_directory, '.config', 'perun')
    else:
        err_msg = "{} platform is currently unsupported.\n\n".format(
            sys.platform)
        err_msg += "Set `PERUN_CONFIG_DIR` environment variable to a valid directory," \
                   "where the global config will be stored and rerun the command."
        perun_log.error(err_msg)

    store.touch_dir_range(home_directory, perun_config_dir)
    return perun_config_dir
Example #6
0
def print_formating_token(fmt_string,
                          info_object,
                          info_attr,
                          size_limit,
                          default_color='white',
                          value_fill=' '):
    """Prints the token from the fmt_string, according to the values stored in info_object

    info_attr is one of the tokens from fmt_string, which is extracted from the info_object,
    that stores the real value. This value is then output to stdout with colours, fills,
    and is trimmed to the given size.

    :param str fmt_string: formatting string for the given token
    :param object info_object: object with stored information (ProfileInfo or MinorVersion)
    :param int size_limit: will limit the output of the value of the info_object to this size
    :param str info_attr: attribute we are looking up in the info_object
    :param str default_color: default colour of the formatting token that will be printed out
    :param char value_fill: will fill the string with this
    """
    # Check if encountered incorrect token in the formating string
    if not hasattr(info_object, info_attr):
        perun_log.error("invalid formatting string '{}': "
                        "object does not contain '{}' attribute".format(
                            fmt_string, info_attr))

    # Obtain the value for the printing
    raw_value = getattr(info_object, info_attr)
    info_value = raw_value[:size_limit].ljust(size_limit, value_fill)

    # Print the actual token
    if info_attr == 'type':
        cprint("[{}]".format(info_value), PROFILE_TYPE_COLOURS[raw_value])
    else:
        cprint(info_value, default_color)
Example #7
0
def _parse_commit(commit):
    """
    Arguments:
        commit(git.Commit): commit object

    Returns:
        MinorVersion: namedtuple of minor version (date author email checksum desc parents)
    """
    checksum = str(commit)
    commit_parents = [str(parent) for parent in commit.parents]

    commit_author_info = commit.author
    if not commit_author_info:
        perun_log.error("fatal: malform commit {}".format(checksum))

    author, email = commit_author_info.name, commit_author_info.email
    timestamp = commit.committed_date
    date = timestamps.timestamp_to_str(int(timestamp))

    commit_description = str(commit.message)
    if not commit_description:
        perun_log.error("fatal: malform commit {}".format(checksum))

    return MinorVersion(date, author, email, checksum, commit_description,
                        commit_parents)
Example #8
0
def run_prephase_commands(phase, phase_colour='white'):
    """Runs the phase before the actual collection of the methods

    This command first retrieves the phase from the configuration, and runs
    safely all of the commands specified in the list.

    The phase is specified in :doc:`config` by keys specified in section
    :cunit:`execute`.

    :param str phase: name of the phase commands
    :param str phase_colour: colour for the printed phase
    """
    phase_key = ".".join(["execute", phase
                          ]) if not phase.startswith('execute') else phase
    cmds = pcs.local_config().safe_get(phase_key, [])
    if cmds:
        log.cprint("Running '{}' phase".format(phase), phase_colour)
        print("")
        try:
            utils.run_safely_list_of_commands(cmds)
        except subprocess.CalledProcessError as exception:
            error_command = str(exception.cmd)
            error_code = exception.returncode
            error_output = exception.output
            log.error(
                "error in {} phase while running '{}' exited with: {} ({})".
                format(phase, error_command, error_code, error_output))
Example #9
0
def process_bokeh_axis_title(ctx, param, value):
    """Processes default value for axes.

    If the value supplied from CLI is non-None, it is returned as it is. Otherwise, we try to
    create some optimal axis name. We do this according to the already processed parameters and
    we either use 'per_key' or 'of_key'.

    Arguments:
        ctx(click.Context): called context of the process
        param(click.Option): called option (either x or y axis)
        value(object): given value for the the option param

    Returns:
        object: either value (if it is non-None) or default legend for given axis
    """
    if value:
        return value
    elif param.human_readable_name.startswith('x'):
        if 'per_key' in ctx.params.keys():
            return ctx.params['per_key']
        elif 'through_key' in ctx.params.keys():
            return ctx.params['through_key']
        else:
            log.error("internal perun error")
    elif param.human_readable_name.startswith('y'):
        return ctx.params['of_key']
    else:
        log.error("internal perun error")
Example #10
0
def degradation_between_files(baseline_file, target_file, minor_version):
    """Checks between pair of files (baseline, target) whether there are any changes in performance.

    :param dict baseline_file: baseline profile we are checking against
    :param dict target_file: target profile we are testing
    :param str minor_version: target minor_version
    """
    # First check if the configurations are compatible
    baseline_config = profiles.to_config_tuple(baseline_file)
    target_config = profiles.to_config_tuple(target_file)
    target_minor_version = target_file.get('origin', minor_version)
    if baseline_config != target_config:
        log.error(
            "incompatible configurations '{}' and '{}'".format(
                baseline_config, target_config) +
            "\n\nPerformance check does not make sense for profiles collected in different ways!"
        )

    detected_changes = [
        (deg, profiles.config_tuple_to_cmdstr(baseline_config),
         target_minor_version)
        for deg in degradation_between_profiles(baseline_file, target_file)
        if deg.result != PerformanceChange.NoChange
    ]

    # Store the detected changes for given minor version
    store.save_degradation_list_for(pcs.get_object_directory(),
                                    target_minor_version, detected_changes)
    print("")
    log.print_list_of_degradations(detected_changes)
    log.print_short_summary_of_degradations(detected_changes)
Example #11
0
def log(head, **kwargs):
    """Shows history of versions and associated profiles.

    Shows the history of the wrapped version control system and all of the
    associated profiles starting from the <hash> point, outputing the
    information about number of profiles, about descriptions ofconcrete minor
    versions, their parents, parents etc.

    If ``perun log --short`` is issued, the shorter version of the ``log`` is
    outputted.

    In no <hash> is given, then HEAD of the version control system is used as a starting point.

    Unless ``perun --no-pager log`` is issued as command, or appropriate
    :ckey:`paging` option is set, the outputs of log will be paged (by
    default using ``less``.

    Refer to :ref:`logs-log` for information how to customize the outputs of
    ``log`` or how to set :ckey:`global.minor_version_info_fmt` in nearest
    configuration.
    """
    try:
        commands.log(head, **kwargs)
    except (NotPerunRepositoryException,
            UnsupportedModuleException) as exception:
        perun_log.error(str(exception))
Example #12
0
def status(**kwargs):
    """Shows the status of vcs, associated profiles and perun.

    Shows the status of both the nearest perun and wrapped version control
    system. For vcs this outputs e.g. the current minor version ``HEAD``,
    current major version and description of the ``HEAD``.  Moreover ``status``
    prints the lists of tracked and pending (found in ``.perun/jobs``) profiles
    lexicographically sorted along with additional information such as their
    types and creation times.

    Unless ``perun --no-pager status`` is issued as command, or appropriate
    :ckey:`paging` option is set, the outputs of status will be paged (by
    default using ``less``.

    An error is raised if the command is executed outside of range of any
    perun, or configuration misses certain configuration keys
    (namely ``global.profile_info_fmt``).

    Refer to :ref:`logs-status` for information how to customize the outputs of
    ``status`` or how to set :ckey:`global.profile_info_fmt` in nearest
    configuration.
    """
    try:
        commands.status(**kwargs)
    except (NotPerunRepositoryException, UnsupportedModuleException,
            MissingConfigSectionException) as exception:
        perun_log.error(str(exception))
Example #13
0
def load_profile_from_args(profile_name, minor_version):
    """
    :param Profile profile_name: profile that will be stored for the minor version
    :param str minor_version: SHA-1 representation of the minor version
    :returns dict: loaded profile represented as dictionary
    """
    # If the profile is in raw form
    if not store.is_sha1(profile_name):
        _, minor_index_file = store.split_object_name(
            pcs.get_object_directory(), minor_version)
        # If there is nothing at all in the index, since it is not even created ;)
        #   we returning nothing otherwise we lookup entries in index
        if not os.path.exists(minor_index_file):
            return None
        with open(minor_index_file, 'rb') as minor_handle:
            lookup_pred = lambda entry: entry.path == profile_name
            profiles = store.lookup_all_entries_within_index(
                minor_handle, lookup_pred)
    else:
        profiles = [profile_name]

    # If there are more profiles we should chose
    if not profiles:
        return None
    chosen_profile = profiles[0]

    # Peek the type if the profile is correct and load the json
    _, profile_name = store.split_object_name(pcs.get_object_directory(),
                                              chosen_profile.checksum)
    profile_type = store.peek_profile_type(profile_name)
    if profile_type == PROFILE_MALFORMED:
        perun_log.error("malformed profile {}".format(profile_name))
    loaded_profile = profile.load_profile_from_file(profile_name, False)

    return loaded_profile
Example #14
0
def _init(vcs_path, vcs_init_params):
    """
    Arguments:
        vcs_path(path): path where the vcs will be initialized
        vcs_init_params(dict): list of additional params for initialization of the vcs

    Returns:
        bool: true if the vcs was successfully initialized at vcs_path
    """
    dir_was_newly_created = not os.path.exists(vcs_path)
    path_contains_git_repo = contains_git_repo(vcs_path)
    try:
        git.Repo.init(vcs_path, **(vcs_init_params or {}))
    except git.exc.GitCommandError as gce:
        # If by calling the init we created empty directory in vcs_path, we cleanup after ourselves
        if os.path.exists(
                vcs_path
        ) and dir_was_newly_created and not os.listdir(vcs_path):
            os.rmdir(vcs_path)
        perun_log.error("while git init: {}".format(gce))

    if path_contains_git_repo:
        perun_log.quiet_info(
            "Reinitialized existing Git repository in {}".format(vcs_path))
    else:
        perun_log.quiet_info(
            "Initialized empty Git repository in {}".format(vcs_path))
    return True
Example #15
0
def load_job_info_from_config(pcs):
    """
    Arguments:
        pcs(PCS): object with performance control system wrapper

    Returns:
        dict: dictionary with cmds, args, workloads, collectors and postprocessors
    """
    local_config = pcs.local_config().data

    if 'collectors' not in local_config.keys():
        log.error("missing 'collector' in the local.yml")
    collectors = local_config['collectors']
    postprocessors = local_config.get('postprocessors', [])

    if 'cmds' not in local_config.keys():
        log.error("missing 'cmds' section in local.yml")

    info = {
        'cmd': local_config['cmds'],
        'workload': local_config.get('workloads', ['']),
        'postprocessor': [post.get('name', '') for post in postprocessors],
        'collector': [collect.get('name', '') for collect in collectors],
        'args': local_config['args'] if 'args' in local_config.keys() else [],
        'collector_params': {
            collect.get('name', ''): collect.get('params', {})
            for collect in collectors
        },
        'postprocesor_params': {
            post.get('name', ''): post.get('params', {})
            for post in postprocessors
        }
    }

    return info
Example #16
0
def bars(profile, filename, view_in_browser, **kwargs):
    """Customizable interpretation of resources using the bar format.

    .. _Bokeh: https://bokeh.pydata.org/en/latest/

    \b
      * **Limitations**: `none`.
      * **Interpretation style**: graphical
      * **Visualization backend**: Bokeh_

    `Bars` graph shows the aggregation (e.g. sum, count, etc.) of resources of
    given types (or keys). Each bar shows ``<func>`` of resources from ``<of>``
    key (e.g. sum of amounts, average of amounts, count of types, etc.) per
    each ``<per>`` key (e.g. per each snapshot, or per each type).  Moreover,
    the graphs can either be (i) stacked, where the different values of
    ``<by>`` key are shown above each other, or (ii) grouped, where the
    different values of ``<by>`` key are shown next to each other. Refer to
    :pkey:`resources` for examples of keys that can be used as ``<of>``,
    ``<key>``, ``<per>`` or ``<by>``.

    Bokeh_ library is the current interpretation backend, which generates HTML
    files, that can be opened directly in the browser. Resulting graphs can be
    further customized by adding custom labels for axes, custom graph title or
    different graph width.

    Example 1. The following will display the sum of sum of amounts of all
    resources of given for each subtype, stacked by uid (e.g. the locations in
    the program)::

        perun show 0@i bars sum --of 'amount' --per 'subtype' --stacked --by 'uid'

    The example output of the bars is as follows::

        \b
                                        <graph_title>
                                `
                                -         .::.                ````````
                                `         :&&:                ` # \\  `
                                -   .::.  ::::        .::.    ` @  }->  <by>
                                `   :##:  :##:        :&&:    ` & /  `
                <func>(<of>)    -   :##:  :##:  .::.  :&&:    ````````
                                `   ::::  :##:  :&&:  ::::
                                -   :@@:  ::::  ::::  :##:
                                `   :@@:  :@@:  :##:  :##:
                                +````||````||````||````||````

                                            <per>

    Refer to :ref:`views-bars` for more thorough description and example of
    `bars` interpretation possibilities.
    """
    try:
        bokeh_helpers.process_profile_to_graphs(bars_factory, profile,
                                                filename, view_in_browser,
                                                **kwargs)
    except AttributeError as attr_error:
        log.error("while creating graph: {}".format(str(attr_error)))
    except InvalidParameterException as ip_error:
        log.error(str(ip_error))
Example #17
0
def walk_index(index_handle):
    """Iterator through index entries

    Reads the beginning of the file, verifying the version and type of the index. Then it iterates
    through all of the index entries and returns them as a IndexEntry structure for further
    processing.

    Arguments:
        index_handle(file): handle to file containing index

    Returns:
        IndexEntry: Index entry named tuple
    """
    # Get end of file position
    index_handle.seek(0, 2)
    last_position = index_handle.tell()

    # Move to the begging of the handle
    index_handle.seek(0)
    magic_bytes = index_handle.read(4)
    assert magic_bytes == INDEX_MAGIC_PREFIX

    index_version = read_int_from_handle(index_handle)
    assert index_version == INDEX_VERSION

    number_of_objects = read_int_from_handle(index_handle)
    loaded_objects = 0

    def read_entry():
        """
        Returns:
            IndexEntry: one read index entry
        """
        # Rather nasty hack, but nothing better comes to my mind currently
        if index_handle.tell() + 24 >= last_position:
            return ''

        file_offset = index_handle.tell()
        file_time = timestamps.timestamp_to_str(
            timestamps.read_timestamp_from_file(index_handle))
        file_sha = binascii.hexlify(index_handle.read(20)).decode('utf-8')
        file_path, byte = "", read_char_from_handle(index_handle)
        while byte != '\0':
            file_path += byte
            byte = read_char_from_handle(index_handle)
        return IndexEntry(file_time, file_sha, file_path, file_offset)

    for entry in iter(read_entry, ''):
        loaded_objects += 1
        if loaded_objects > number_of_objects:
            perun_log.error("fatal: malformed index file")
        yield entry

    if loaded_objects != number_of_objects:
        perun_log.error("fatal: malformed index file")
Example #18
0
def walk_index(index_handle):
    """Iterator through index entries

    Reads the beginning of the file, verifying the version and type of the index. Then it iterates
    through all of the index entries and returns them as a IndexEntry structure for further
    processing.

    :param file index_handle: handle to file containing index
    :returns IndexEntry: Index entry named tuple
    """
    # Get end of file position
    index_handle.seek(0, 2)
    last_position = index_handle.tell()

    # Move to the begging of the handle
    index_handle.seek(0)
    magic_bytes = index_handle.read(4)
    if magic_bytes != helpers.INDEX_MAGIC_PREFIX:
        raise MalformedIndexFileException("read blob is not an index file")

    index_version = read_int_from_handle(index_handle)
    if index_version != helpers.INDEX_VERSION:
        raise MalformedIndexFileException("read index file is in format of different index version"
                                          " (read index file = {}".format(index_version) +
                                          ", supported = {})".format(helpers.INDEX_VERSION))

    number_of_objects = read_int_from_handle(index_handle)
    loaded_objects = 0

    def read_entry():
        """
        :returns IndexEntry: one read index entry
        """
        # Rather nasty hack, but nothing better comes to my mind currently
        if index_handle.tell() + 24 >= last_position:
            return ''

        file_offset = index_handle.tell()
        file_time = timestamps.timestamp_to_str(timestamps.read_timestamp_from_file(index_handle))
        file_sha = binascii.hexlify(index_handle.read(20)).decode('utf-8')
        file_path, byte = "", read_char_from_handle(index_handle)
        while byte != '\0':
            file_path += byte
            byte = read_char_from_handle(index_handle)
        return IndexEntry(file_time, file_sha, file_path, file_offset)

    for entry in iter(read_entry, ''):
        loaded_objects += 1
        if loaded_objects > number_of_objects:
            perun_log.error("fatal: malformed index file")
        yield entry

    if loaded_objects != number_of_objects:
        perun_log.error("fatal: malformed index file")
Example #19
0
def unsupported_option_callback(_, param, value):
    """Processes the currently unsupported option or argument.

    :param click.Context _: called context of the parameter
    :param click.Option param: parameter we are processing
    :param Object value: value of the parameter we are trying to set
    """
    if value:
        err_msg = "option '{}'".format(param.human_readable_name)
        err_msg += "is unsupported/not implemented in this version of perun"
        err_msg += "\n\nPlease update your perun or wait patiently for the implementation"
        log.error(err_msg)
Example #20
0
def add(profile, minor, **kwargs):
    """Links profile to concrete minor version storing its content in the
    ``.perun`` dir and registering the profile in internal minor version index.

    In order to link <profile> to given minor version <hash> the following
    steps are executed:

        1. We check in <profile> that its :preg:`origin` key corresponds to
           <hash>. This serves as a check, that we do not assign profiles to
           different minor versions.

        2. The :preg:`origin` is removed and contents of <profile> are
           compresed using `zlib` compression method.

        3. Binary header for the profile is constructed.

        4. Compressed contents are appended to header, and this blob is stored
           in ``.perun/objects`` directory.

        5. New blob is registered in <hash> minor version's index.

        6. Unless ``--keep-profile`` is set. The original profile is deleted.

    If no `<hash>` is specified, then current ``HEAD`` of the wrapped version
    control system is used instead. Massaging of <hash> is taken care of by
    underlying version control system (e.g. git uses ``git rev-parse``).

    <profile> can either be a ``pending tag`` or a fullpath. ``Pending tags``
    are in form of ``i@p``, where ``i`` stands for an index in the pending
    profile directory (i.e. ``.perun/jobs``) and ``@p`` is literal suffix.
    Run ``perun status`` to see the `tag` anotation of pending profiles.

    Example of adding profiles:

    .. code-block:: bash

        $ perun add mybin-memory-input.txt-2017-03-01-16-11-04.perf

    This command adds the profile collected by `memory` collector during
    profiling ``mybin`` command with ``input.txt`` workload on 1st March at
    16:11 to the current ``HEAD``.

    An error is raised if the command is executed outside of range of any
    perun, if <profile> points to incorrect profile (i.e. not w.r.t.
    :ref:`profile-spec`) or <hash> does not point to valid minor version ref.

    See :doc:`internals` for information how perun handles profiles internally.
    """
    try:
        commands.add(profile, minor, **kwargs)
    except (NotPerunRepositoryException,
            IncorrectProfileFormatException) as exception:
        perun_log.error(str(exception))
Example #21
0
 def get_config_dir(self, config_type):
     """
     Returns:
         str: path of to the directory of with the config
     """
     if config_type in ('local', 'recursive'):
         return self.path
     elif config_type in ('shared', 'global'):
         return config.lookup_shared_config_dir()
     else:
         log.error("wrong configuration type for self.get_config_dir: '{}'".
                   format(config_type))
Example #22
0
def configure_local_perun(perun_path):
    """Configures the local perun repository with the interactive help of the user

    Arguments:
        perun_path(str): destination path of the perun repository
    """
    pcs = PCS(perun_path)
    editor = perun_config.lookup_key_recursively(pcs.path, 'global.editor')
    local_config_file = pcs.get_config_file('local')
    try:
        utils.run_external_command([editor, local_config_file])
    except ValueError as v_exception:
        perun_log.error("could not invoke '{}' editor: {}".format(
            editor, str(v_exception)))
Example #23
0
 def get_config_file(self, config_type):
     """
     Returns:
         str: path of the config of the given type
     """
     if config_type in ('local', 'recursive'):
         return os.path.join(self.path, 'local.yml')
     elif config_type in ('shared', 'global'):
         return os.path.join(config.lookup_shared_config_dir(),
                             'shared.yml')
     else:
         log.error(
             "wrong configuration type for self.get_config_file: '{}'".
             format(config_type))
Example #24
0
def create_from_params(profile,
                       func,
                       of_key,
                       per_key,
                       by_key,
                       cummulation_type,
                       x_axis_label,
                       y_axis_label,
                       graph_title,
                       graph_width=800):
    """Creates Bar graph according to the given parameters.

    Takes the input profile, convert it to pandas.DataFrame. Then the data according to 'of_key'
    parameter are used as values and are output by aggregation function of 'func' depending on
    values of 'per_key'. Values are further stacked by 'by_key' key and cummulated according to the
    type.

    Arguments:
        profile(dict): dictionary with measured data
        func(str): function that will be used for aggregation of the data
        of_key(str): key that specifies which fields of the resource entry will be used as data
        per_key(str): key that specifies fields of the resource that will be on the x axis
        by_key(str): key that specifies grouping or stacking of the resources
        cummulation_type(str): type of the cummulation of the data (either stacked or grouped)
        x_axis_label(str): label on the x axis
        y_axis_label(str): label on the y axis
        graph_title(str): name of the graph
        graph_width(int): width of the created bokeh graph

    Returns:
        charts.Bar: bar graph according to the params
    """
    # Convert profile to pandas data grid
    data_frame = convert.resources_to_pandas_dataframe(profile)

    # Create basic graph:
    if cummulation_type == 'stacked':
        bar_graph = create_stacked_bar_graph(data_frame, func, of_key, per_key,
                                             by_key)
    elif cummulation_type == 'grouped':
        bar_graph = create_grouped_bar_graph(data_frame, func, of_key, per_key,
                                             by_key)
    else:
        log.error("unknown cummulation type '{}'".format(cummulation_type))

    # Call basic configuration of the graph
    bokeh_helpers.configure_graph(bar_graph, profile, func, graph_title,
                                  x_axis_label, y_axis_label, graph_width)

    return bar_graph
Example #25
0
def read_config_from(path):
    """Reads the config data from the path

    :param str path: source path of the config
    :returns: configuration data represented as dictionary of keys and their appropriate values
        (possibly nested)
    """
    try:
        return streams.safely_load_yaml_from_file(path)
    except scanner.ScannerError as scanner_error:
        perun_log.error(
            "corrupted configuration file '{}': {}\n".format(
                path, str(scanner_error)) +
            "\nPerhaps you did not escape strings with special characters in quotes?"
        )
Example #26
0
def _get_minor_version_info(git_repo, minor_version):
    """
    Arguments:
        git_repo(git.Repo): wrapped repository of the perun
        minor_version(str): identification of minor_version

    Returns:
        MinorVersion: namedtuple of minor version (date author email checksum desc parents)
    """
    assert store.is_sha1(minor_version)

    minor_version_commit = git_repo.commit(minor_version)
    if not minor_version_commit:
        perun_log.error(
            "{} does not represent valid commit object".format(minor_version))
    return _parse_commit(minor_version_commit)
Example #27
0
def group_and_aggregate(data, group_through_key, func):
    """Groups the data by group_through_key and then aggregates it through function

    :param pandas.DataFrame data: data frame with partially grouped data
    :param str group_through_key: key which will be used for further aggregation
    :param str func: aggregation function for the grouped data
    :returns dict: source data frame
    """
    # Aggregate the data according to the func grouped by through_key
    through_data_group = data.groupby(group_through_key)
    try:
        aggregation_function = getattr(through_data_group, func)
    except AttributeError:
        log.error("{} function is not supported as aggregation for this visualization".format(
            func
        ))
    return aggregation_function()
Example #28
0
def generate_header_for_profile(job):
    """
    :param Job job: job with information about the computed profile
    :returns dict: dictionary in form of {'header': {}} corresponding to the perun specification
    """
    try:
        collector = get_module('.'.join(['perun.collect', job.collector.name]))
    except ImportError:
        perun_log.error("could not find package for collector {}".format(job.collector.name))

    return {
        'type': collector.COLLECTOR_TYPE,
        'cmd': job.cmd,
        'params': job.args,
        'workload': job.workload,
        'units': generate_units(collector)
    }
Example #29
0
def build_demangle_cache(names):
    """Builds global cache for demangle() function calls.

    Instead of continuous calls to subprocess, this takes all of the collected names
    and calls the demangle just once, while constructing the cache.

    :param set names: set of names that will be demangled in future
    """
    global demangle_cache

    list_of_names = list(names)
    if not all(map(PATTERN_WORD.match, list_of_names)):
        log.error("incorrect values in demangled names")
    else:
        sys_call = ['c++filt'] + list_of_names
        output = subprocess.check_output(sys_call).decode("utf-8").strip()
        demangle_cache = dict(zip(list_of_names, output.split("\n")))
Example #30
0
def load_config(config_dir, config_type):
    """Loads the configuration of given type from the appropriate file (either local.yml or
    global.yml).

    :param str config_dir: directory, where the config is stored
    :param str config_type: type of the config (either shared or local)
    :returns: loaded Config object with populated data and set path and type
    """
    config_file = os.sep.join([config_dir, ".".join([config_type, 'yml'])])

    try:
        if not os.path.exists(config_file):
            init_config_at(config_file, config_type)

        return Config(config_type, config_file, read_config_from(config_file))
    except IOError as io_error:
        perun_log.error("error initializing {} config: {}".format(
            config_type, str(io_error)))