예제 #1
0
def extract_metric(metric, line, output, prefix='', lower_is_better=True):
    try:
        name, value_part = [part.strip() for part in line.split(':')]
        if name != metric:
            message = 'Name mismatch: expected "{}", got "{}"'
            raise WorkloadError(message.format(metric, name.strip()))
        if not value_part or not value_part[0].isdigit():
            raise ValueError(
                'value part does not start with a digit: {}'.format(
                    value_part))
        idx = -1
        if not value_part[idx].isdigit(
        ):  # units detected at the end of the line
            while not value_part[idx - 1].isdigit():
                idx -= 1
            value = numeric(value_part[:idx])
            units = value_part[idx:]
        else:
            value = numeric(value_part)
            units = None
        output.add_metric(prefix + metric,
                          value,
                          units,
                          lower_is_better=lower_is_better)
    except Exception as e:
        message = 'Could not extract sysbench metric "{}"; got "{}"'
        raise WorkloadError(message.format(prefix + metric, e))
예제 #2
0
    def extract_metrics_from_logcat(self, context):
        metric_names = ['mean', 'junk_p', 'std_dev', 'count_bad', 'count_junk']
        logcat_file = context.get_artifact_path('logcat')
        with open(logcat_file) as fh:
            run_tests = copy(self.test_ids or self.valid_test_ids)
            current_iter = None
            current_test = None
            for line in fh:

                match = self.iteration_regex.search(line)
                if match:
                    if current_iter is not None:
                        msg = 'Did not see results for iteration {} of {}'
                        self.logger.warning(msg.format(current_iter, current_test))
                    current_iter = int(match.group('iteration'))
                    if current_iter == 0:
                        try:
                            current_test = run_tests.pop(0)
                        except IndexError:
                            self.logger.warning('Encountered an iteration for an unknown test.')
                            current_test = 'unknown'
                    continue

                match = self.metrics_regex.search(line)
                if match:
                    if current_iter is None:
                        self.logger.warning('Encountered unexpected metrics (no iteration)')
                        continue

                    for name in metric_names:
                        value = numeric(match.group(name))
                        context.add_metric(name, value, units=None, lower_is_better=True,
                                           classifiers={'test_id': current_test, 'rep': current_iter})

                    current_iter = None
예제 #3
0
 def __init__(self, name, value, units=None, lower_is_better=False,
              classifiers=None):
     self.name = name
     self.value = numeric(value)
     self.units = units
     self.lower_is_better = lower_is_better
     self.classifiers = classifiers or {}
예제 #4
0
    def extract_metrics_from_logcat(self, context):
        metric_names = ['mean', 'junk_p', 'std_dev', 'count_bad', 'count_junk']
        logcat_file = context.get_artifact_path('logcat')
        with open(logcat_file) as fh:
            run_tests = copy(self.test_ids or self.valid_test_ids)
            current_iter = None
            current_test = None
            for line in fh:

                match = self.iteration_regex.search(line)
                if match:
                    if current_iter is not None:
                        msg = 'Did not see results for iteration {} of {}'
                        self.logger.warning(msg.format(current_iter, current_test))
                    current_iter = int(match.group('iteration'))
                    if current_iter == 0:
                        try:
                            current_test = run_tests.pop(0)
                        except IndexError:
                            self.logger.warning('Encountered an iteration for an unknown test.')
                            current_test = 'unknown'
                    continue

                match = self.metrics_regex.search(line)
                if match:
                    if current_iter is None:
                        self.logger.warning('Encountered unexpected metrics (no iteration)')
                        continue

                    for name in metric_names:
                        value = numeric(match.group(name))
                        context.add_metric(name, value, units=None, lower_is_better=True,
                                           classifiers={'test_id': current_test, 'rep': current_iter})

                    current_iter = None
예제 #5
0
def try_convert_to_numeric(v):
    try:
        if isiterable(v):
            return list(map(numeric, v))
        else:
            return numeric(v)
    except ValueError:
        return v
예제 #6
0
def try_convert_to_numeric(v):
    try:
        if isiterable(v):
            return list(map(numeric, v))
        else:
            return numeric(v)
    except ValueError:
        return v
예제 #7
0
파일: perf.py 프로젝트: qais-yousef/lisa
    def _add_report_metric(column_headers, column_header_indeces, line, words,
                           context, event_type, label):
        if '%' not in words[0]:
            return
        classifiers = {}
        for i in range(1, len(column_headers)):
            classifiers[column_headers[i]] = line[
                column_header_indeces[i]:column_header_indeces[i + 1]].strip()

        context.add_metric('{}_{}_Overhead'.format(label, event_type),
                           numeric(words[0].strip('%')),
                           'percent',
                           classifiers=classifiers)
예제 #8
0
def extract_metric(metric, line, output, prefix='', lower_is_better=True):
    try:
        name, value_part = [part.strip() for part in line.split(':')]
        if name != metric:
            message = 'Name mismatch: expected "{}", got "{}"'
            raise WorkloadError(message.format(metric, name.strip()))
        if not value_part or not value_part[0].isdigit():
            raise ValueError('value part does not start with a digit: {}'.format(value_part))
        idx = -1
        if not value_part[idx].isdigit():  # units detected at the end of the line
            while not value_part[idx - 1].isdigit():
                idx -= 1
            value = numeric(value_part[:idx])
            units = value_part[idx:]
        else:
            value = numeric(value_part)
            units = None
        output.add_metric(prefix + metric,
                          value, units, lower_is_better=lower_is_better)
    except Exception as e:
        message = 'Could not extract sysbench metric "{}"; got "{}"'
        raise WorkloadError(message.format(prefix + metric, e))
예제 #9
0
def extract_threads_fairness_metric(metric, line, output):
    try:
        name_part, value_part = [part.strip() for part in line.split(':')]
        name = name_part.split('(')[0].strip()
        if name != metric:
            message = 'Name mismatch: expected "{}", got "{}"'
            raise WorkloadError(message.format(metric, name))
        avg, stddev = [numeric(v) for v in value_part.split('/')]
        output.add_metric('thread fairness {} avg'.format(metric), avg)
        output.add_metric('thread fairness {} stddev'.format(metric),
                          stddev, lower_is_better=True)
    except Exception as e:
        message = 'Could not extract sysbench metric "{}"; got "{}"'
        raise WorkloadError(message.format(metric, e))
예제 #10
0
def extract_threads_fairness_metric(metric, line, output):
    try:
        name_part, value_part = [part.strip() for part in line.split(':')]
        name = name_part.split('(')[0].strip()
        if name != metric:
            message = 'Name mismatch: expected "{}", got "{}"'
            raise WorkloadError(message.format(metric, name))
        avg, stddev = [numeric(v) for v in value_part.split('/')]
        output.add_metric('thread fairness {} avg'.format(metric), avg)
        output.add_metric('thread fairness {} stddev'.format(metric),
                          stddev, lower_is_better=True)
    except Exception as e:
        message = 'Could not extract sysbench metric "{}"; got "{}"'
        raise WorkloadError(message.format(metric, e))
예제 #11
0
    def __init__(self, thread, cpu_id, ts, name, body, parser=None):
        """
        parameters:

        :thread: thread which generated the event
        :cpu: cpu on which the event has occurred
        :ts: timestamp of the event
        :name: the name of the event
        :bodytext: a string with the rest of the event text
        :parser: optionally, a function that will parse body text to populate
                 this event's attributes

        The parser can be any callable that can be invoked with

            parser(event, text)

        Where ``event`` is this TraceCmdEvent instance, and ``text`` is the body text to be
        parsed. The parser should updated the passed event instance and not return anything
        (the return value will be ignored). Any exceptions raised by the parser will be silently
        ignored (note that this means that the event's attributes may be partially initialized).

        """
        self.thread = thread
        self.reporting_cpu_id = int(cpu_id)
        self.timestamp = numeric(ts)
        self.name = name
        self.text = body
        self.fields = {}

        if parser:
            try:
                parser(self, self.text)
            except Exception:  # pylint: disable=broad-except
                # unknown format assume user does not care or know how to
                # parse self.text
                pass
예제 #12
0
    def __init__(self, thread, cpu_id, ts, name, body, parser=None):
        """
        parameters:

        :thread: thread which generated the event
        :cpu: cpu on which the event has occurred
        :ts: timestamp of the event
        :name: the name of the event
        :bodytext: a string with the rest of the event text
        :parser: optionally, a function that will parse body text to populate
                 this event's attributes

        The parser can be any callable that can be invoked with

            parser(event, text)

        Where ``event`` is this TraceCmdEvent instance, and ``text`` is the body text to be
        parsed. The parser should updated the passed event instance and not return anything
        (the return value will be ignored). Any exceptions raised by the parser will be silently
        ignored (note that this means that the event's attributes may be partially initialized).

        """
        self.thread = thread
        self.reporting_cpu_id = int(cpu_id)
        self.timestamp = numeric(ts)
        self.name = name
        self.text = body
        self.fields = {}

        if parser:
            try:
                parser(self, self.text)
            except Exception:  # pylint: disable=broad-except
                # unknown format assume user does not care or know how to
                # parse self.text
                pass
예제 #13
0
def numeric_best_effort(value):
    try:
        return numeric(value)
    except ValueError:
        return value
예제 #14
0
def numeric_best_effort(value):
    try:
        return numeric(value)
    except ValueError:
        return value