コード例 #1
0
    def run(self):
        while not self.stopped():
            exists = os.path.isfile(self.stress_log_filename)
            if not exists:
                time.sleep(0.5)
                continue

            for line in self.follow_file(self.stress_log_filename):
                if self.stopped():
                    break

                if self.skip_line(line=line):
                    continue

                cols = self.split_line(line=line)

                for metric in [
                        'lat_mean', 'lat_med', 'lat_perc_95', 'lat_perc_99',
                        'lat_perc_999', 'lat_max'
                ]:
                    if metric_value := self.get_metric_value(
                            columns=cols, metric_name=metric):
                        self.set_metric(metric,
                                        convert_metric_to_ms(metric_value))

                if ops := self.get_metric_value(columns=cols,
                                                metric_name='ops'):
                    self.set_metric('ops', float(ops))

                if errors := cols[self.metrics_positions.errors]:
                    self.set_metric('errors', int(errors))
コード例 #2
0
 def test_scylla_bench_metrics_conversion(self):  # pylint: disable=no-self-use
     metrics = {"4ms": 4.0, "950µs": 0.95, "30ms": 30.0, "8.592961906s": 8592.961905999999,
                "18.120703ms": 18.120703, "5.963775µs": 0.005963775, "9h0m0.024080491s": 32400024.080491,
                "1m0.024080491s": 60024.080491, "546431": 546431.0}
     for metric, converted in metrics.items():
         actual = convert_metric_to_ms(metric)
         assert actual == converted, f"Expected {converted}, got {actual}"
コード例 #3
0
    def _parse_bench_summary(cls, lines):
        """
        Parsing bench results, only parse the summary results.
        Collect results of all nodes and return a dictionaries' list,
        the new structure data will be easy to parse, compare, display or save.
        """
        results = {
            'keyspace_idx': None,
            'stdev gc time(ms)': None,
            'Total errors': None,
            'total gc count': None,
            'loader_idx': None,
            'total gc time (s)': None,
            'total gc mb': 0,
            'cpu_idx': None,
            'avg gc time(ms)': None,
            'latency mean': None
        }

        for line in lines:
            line.strip()
            # Parse load params
            # pylint: disable=too-many-boolean-expressions
            if line.startswith('Results'):
                continue
            if 'c-o fixed latency' in line:
                # Ignore C-O Fixed latencies
                #
                # c-o fixed latency :
                #   max:        5.668863ms
                #   99.9th:	    5.537791ms
                #   99th:       3.440639ms
                #   95th:       3.342335ms
                break

            split = line.split(':', maxsplit=1)
            if len(split) < 2:
                continue
            key = split[0].strip()
            value = ' '.join(split[1].split())
            if target_key := cls._SB_STATS_MAPPING.get(key):
                if value.isdecimal():
                    value = int(value)
                else:
                    value = convert_metric_to_ms(value)
                results[target_key] = value
            else:
                LOGGER.debug('unknown result key found: `%s` with value `%s`',
                             key, value)
コード例 #4
0
    def _parse_bench_summary(lines):  # pylint: disable=too-many-branches
        """
        Parsing bench results, only parse the summary results.
        Collect results of all nodes and return a dictionaries' list,
        the new structure data will be easy to parse, compare, display or save.
        """
        results = {
            'keyspace_idx': None,
            'stdev gc time(ms)': None,
            'Total errors': None,
            'total gc count': None,
            'loader_idx': None,
            'total gc time (s)': None,
            'total gc mb': 0,
            'cpu_idx': None,
            'avg gc time(ms)': None,
            'latency mean': None
        }
        enable_parse = False

        for line in lines:
            line.strip()
            # Parse load params
            # pylint: disable=too-many-boolean-expressions
            if line.startswith('Mode:') or line.startswith('Workload:') or line.startswith('Timeout:') or \
                    line.startswith('Consistency level:') or line.startswith('Partition count') or \
                    line.startswith('Clustering rows:') or \
                    line.startswith('Rows per request:') or line.startswith('Page size:') or \
                    line.startswith('Concurrency:') or line.startswith('Connections:') or \
                    line.startswith('Maximum rate:') or line.startswith('Client compression:'):
                split_idx = line.index(':')
                key = line[:split_idx].strip()
                value = line[split_idx + 1:].split()[0]
                results[key] = value
            elif line.startswith('Clustering row size:'):
                split_idx = line.index(':')
                key = line[:split_idx].strip()
                value = ' '.join(line[split_idx + 1:].split())
                results[key] = value

            if line.startswith('Results'):
                enable_parse = True
                continue
            if line.startswith('Latency:') or ':' not in line or 'EOF' in line:
                continue
            if not enable_parse:
                continue

            split_idx = line.index(':')
            key = line[:split_idx].strip()
            value = line[split_idx + 1:].split()[0]

            # the value may be in milliseconds(ms) or microseconds(string containing non-ascii character)
            value = convert_metric_to_ms(value)

            # we try to use the same stats as we have in cassandra
            if key == 'max':
                key = 'latency max'
            elif key == '99.9th':
                key = 'latency 99.9th percentile'
            elif key == '99th':
                key = 'latency 99th percentile'
            elif key == '95th':
                key = 'latency 95th percentile'
            elif key == 'median':
                key = 'latency median'
            elif key == 'mean':
                key = 'latency mean'
            elif key == 'Operations/s':
                key = 'op rate'
            elif key == 'Rows/s':
                key = 'row rate'
                results['partition rate'] = value
            elif key == 'Total ops':  # ==Total rows?
                key = 'Total partitions'
            elif key == 'Time (avg)':
                key = 'Total operation time'
            results[key] = value
        return results