Exemplo n.º 1
0
class PBenchStatsReader(object):
    MARKER = "\n},"

    def __init__(self, filename, parent_logger):
        super(PBenchStatsReader, self).__init__()
        self.log = parent_logger.getChild(self.__class__.__name__)
        self.file = FileReader(filename=filename, parent_logger=self.log)
        self.buffer = ''
        self.data = {}
        self.last_data = 0

    def read_file(self):
        _bytes = self.file.get_bytes()
        if _bytes:
            self.buffer += _bytes

        while self.MARKER in self.buffer:
            idx = self.buffer.find(self.MARKER) + len(self.MARKER)
            chunk_str = self.buffer[:idx - 1]
            self.buffer = self.buffer[idx + + 1:]
            chunk = json.loads("{%s}" % chunk_str)

            for date_str in chunk.keys():
                statistics = chunk[date_str]

                date_obj = datetime.datetime.strptime(date_str.split(".")[0], '%Y-%m-%d %H:%M:%S')
                date = int(time.mktime(date_obj.timetuple()))
                self.data[date] = 0

                for benchmark_name in statistics.keys():
                    if not benchmark_name.startswith("benchmark_io"):
                        continue
                    benchmark = statistics[benchmark_name]
                    for method in benchmark:
                        meth_obj = benchmark[method]
                        if "mmtasks" in meth_obj:
                            self.data[date] += meth_obj["mmtasks"][2]

                self.log.debug("Active instances stats for %s: %s", date, self.data[date])

    def get_data(self, tstmp):
        if tstmp in self.data:
            self.last_data = self.data[tstmp]
            return self.data[tstmp]
        else:
            self.log.debug("No active instances info for %s", tstmp)
            return self.last_data
Exemplo n.º 2
0
class SlavesReader(ResultsProvider):
    def __init__(self, filename, num_slaves, parent_logger):
        """
        :type filename: str
        :type num_slaves: int
        :type parent_logger: logging.Logger
        """
        super(SlavesReader, self).__init__()
        self.log = parent_logger.getChild(self.__class__.__name__)
        self.join_buffer = {}
        self.num_slaves = num_slaves
        self.file = FileReader(filename=filename, parent_logger=self.log)
        self.read_buffer = ""

    def _calculate_datapoints(self, final_pass=False):
        read = self.file.get_bytes(size=1024 * 1024, last_pass=final_pass)
        if not read or not read.strip():
            return
        self.read_buffer += read
        while "\n" in self.read_buffer:
            _line = self.read_buffer[:self.read_buffer.index("\n") + 1]
            self.read_buffer = self.read_buffer[len(_line):]
            self.fill_join_buffer(json.loads(_line))

        max_full_ts = self.get_max_full_ts()

        if max_full_ts is not None:
            for point in self.merge_datapoints(max_full_ts):
                yield point

    def merge_datapoints(self, max_full_ts):
        reader_id = self.file.name + "@" + str(id(self))
        for key in sorted(self.join_buffer.keys(), key=int):
            if int(key) <= max_full_ts:
                sec_data = self.join_buffer.pop(key)
                self.log.debug("Processing complete second: %s", key)
                point = DataPoint(int(key))
                point[DataPoint.SOURCE_ID] = reader_id
                for sid, item in iteritems(sec_data):
                    point.merge_point(self.point_from_locust(key, sid, item))
                point.recalculate()
                yield point

    def get_max_full_ts(self):
        max_full_ts = None
        for key in sorted(self.join_buffer.keys(), key=int):
            if len(key) >= self.num_slaves:
                max_full_ts = int(key)
        return max_full_ts

    def fill_join_buffer(self, data):
        self.log.debug("Got slave data: %s", data)
        for stats_item in data['stats']:
            for timestamp in stats_item['num_reqs_per_sec'].keys():
                if timestamp not in self.join_buffer:
                    self.join_buffer[timestamp] = {}
                self.join_buffer[timestamp][data['client_id']] = data

    @staticmethod
    def point_from_locust(timestamp, sid, data):
        """
        :type timestamp: str
        :type sid: str
        :type data: dict
        :rtype: DataPoint
        """
        point = DataPoint(int(timestamp))
        point[DataPoint.SOURCE_ID] = sid
        overall = KPISet()
        for item in data['stats']:
            if timestamp not in item['num_reqs_per_sec']:
                continue

            kpiset = KPISet()
            kpiset[KPISet.SAMPLE_COUNT] = item['num_reqs_per_sec'][timestamp]
            kpiset[KPISet.CONCURRENCY] = data['user_count']
            kpiset[KPISet.BYTE_COUNT] = item['total_content_length']
            if item['num_requests']:
                avg_rt = (item['total_response_time'] / 1000.0) / item['num_requests']
                kpiset.sum_rt = item['num_reqs_per_sec'][timestamp] * avg_rt

            for err in data['errors'].values():
                if err['name'] == item['name']:
                    new_err = KPISet.error_item_skel(err['error'], None, err['occurences'], KPISet.ERRTYPE_ERROR,
                                                     Counter(), None)
                    KPISet.inc_list(kpiset[KPISet.ERRORS], ("msg", err['error']), new_err)
                    kpiset[KPISet.FAILURES] += err['occurences']

            kpiset[KPISet.SUCCESSES] = kpiset[KPISet.SAMPLE_COUNT] - kpiset[KPISet.FAILURES]
            point[DataPoint.CURRENT][item['name']] = kpiset
            overall.merge_kpis(kpiset, sid)

        point[DataPoint.CURRENT][''] = overall
        point.recalculate()
        return point
Exemplo n.º 3
0
class SlavesReader(ResultsProvider):
    def __init__(self, filename, num_slaves, parent_logger):
        """
        :type filename: str
        :type num_slaves: int
        :type parent_logger: logging.Logger
        """
        super(SlavesReader, self).__init__()
        self.log = parent_logger.getChild(self.__class__.__name__)
        self.join_buffer = {}
        self.num_slaves = num_slaves
        self.file = FileReader(filename=filename, parent_logger=self.log)
        self.read_buffer = ""

    def _calculate_datapoints(self, final_pass=False):
        self.read_buffer += self.file.get_bytes(size=1024 * 1024,
                                                last_pass=final_pass)
        while "\n" in self.read_buffer:
            _line = self.read_buffer[:self.read_buffer.index("\n") + 1]
            self.read_buffer = self.read_buffer[len(_line):]
            self.fill_join_buffer(json.loads(_line))

        max_full_ts = self.get_max_full_ts()

        if max_full_ts is not None:
            for point in self.merge_datapoints(max_full_ts):
                yield point

    def merge_datapoints(self, max_full_ts):
        for key in sorted(self.join_buffer.keys(), key=int):
            if int(key) <= max_full_ts:
                sec_data = self.join_buffer.pop(key)
                self.log.debug("Processing complete second: %s", key)
                point = DataPoint(int(key))
                for sid, item in iteritems(sec_data):
                    point.merge_point(self.point_from_locust(key, sid, item))
                point.recalculate()
                yield point

    def get_max_full_ts(self):
        max_full_ts = None
        for key in sorted(self.join_buffer.keys(), key=int):
            if len(key) >= self.num_slaves:
                max_full_ts = int(key)
        return max_full_ts

    def fill_join_buffer(self, data):
        self.log.debug("Got slave data: %s", data)
        for stats_item in data['stats']:
            for timestamp in stats_item['num_reqs_per_sec'].keys():
                if timestamp not in self.join_buffer:
                    self.join_buffer[timestamp] = {}
                self.join_buffer[timestamp][data['client_id']] = data

    @staticmethod
    def point_from_locust(timestamp, sid, data):
        """
        :type timestamp: str
        :type sid: str
        :type data: dict
        :rtype: DataPoint
        """
        point = DataPoint(int(timestamp))
        point[DataPoint.SOURCE_ID] = sid
        overall = KPISet()
        for item in data['stats']:
            if timestamp not in item['num_reqs_per_sec']:
                continue

            kpiset = KPISet()
            kpiset[KPISet.SAMPLE_COUNT] = item['num_reqs_per_sec'][timestamp]
            kpiset[KPISet.CONCURRENCY] = data['user_count']
            kpiset[KPISet.BYTE_COUNT] = item['total_content_length']
            if item['num_requests']:
                avg_rt = (item['total_response_time'] /
                          1000.0) / item['num_requests']
                kpiset.sum_rt = item['num_reqs_per_sec'][timestamp] * avg_rt

            for err in data['errors'].values():
                if err['name'] == item['name']:
                    new_err = KPISet.error_item_skel(err['error'], None,
                                                     err['occurences'],
                                                     KPISet.ERRTYPE_ERROR,
                                                     Counter(), None)
                    KPISet.inc_list(kpiset[KPISet.ERRORS],
                                    ("msg", err['error']), new_err)
                    kpiset[KPISet.FAILURES] += err['occurences']

            point[DataPoint.CURRENT][item['name']] = kpiset
            overall.merge_kpis(kpiset)

        point[DataPoint.CURRENT][''] = overall
        point.recalculate()
        return point
Exemplo n.º 4
0
class Scheduler(object):
    REC_TYPE_SCHEDULE = 0
    REC_TYPE_LOOP_START = 1
    REC_TYPE_STOP = 2

    def __init__(self, load, payload_filename, parent_logger):
        super(Scheduler, self).__init__()
        self.need_start_loop = None
        self.log = parent_logger.getChild(self.__class__.__name__)
        self.load = load
        self.payload_file = FileReader(filename=payload_filename, parent_logger=self.log)
        if not load.duration and not load.iterations:
            self.iteration_limit = 1
        else:
            self.iteration_limit = load.iterations

        self.concurrency = load.concurrency if load.concurrency is not None else 1

        self.step_len = load.ramp_up / load.steps if load.steps and load.ramp_up else 0
        if load.throughput:
            self.ramp_up_slope = load.throughput / load.ramp_up if load.ramp_up else 0
            self.step_size = float(load.throughput) / load.steps if load.steps else 0
        else:
            self.ramp_up_slope = None
            self.step_size = float(self.concurrency) / load.steps if load.steps else 0

        self.count = 0.0
        self.time_offset = 0.0
        self.iterations = 0

    def _payload_reader(self):
        self.iterations = 1
        rec_type = self.REC_TYPE_SCHEDULE
        while True:
            payload_offset = self.payload_file.offset
            line = self.payload_file.get_line()
            if not line:  # rewind
                self.payload_file.offset = 0
                self.iterations += 1

                if self.need_start_loop and not self.iteration_limit:
                    self.need_start_loop = False
                    self.iteration_limit = self.iterations
                    rec_type = self.REC_TYPE_LOOP_START

                if self.iteration_limit and self.iterations > self.iteration_limit:
                    self.log.debug("Schedule iterations limit reached: %s", self.iteration_limit)
                    break

            if not line.strip():  # we're fine to skip empty lines between records
                continue

            parts = line.split(' ')
            if len(parts) < 2:
                raise TaurusInternalException("Wrong format for meta-info line: %s" % line)

            payload_len, marker = parts
            payload_len = int(payload_len)
            payload = self.payload_file.get_bytes(payload_len)
            yield payload_len, payload_offset, payload, marker.strip(), len(line), rec_type
            rec_type = self.REC_TYPE_SCHEDULE

    def generate(self):
        for payload_len, payload_offset, payload, marker, meta_len, record_type in self._payload_reader():
            if self.load.throughput:
                self.time_offset += self.__get_time_offset_rps()
                if self.load.duration and self.time_offset > self.load.duration:
                    self.log.debug("Duration limit reached: %s", self.time_offset)
                    break
            else:  # concurrency schedule
                self.time_offset = self.__get_time_offset_concurrency()

            overall_len = payload_len + meta_len
            yield self.time_offset, payload_len, payload_offset, payload, marker, record_type, overall_len
            self.count += 1

    def __get_time_offset_concurrency(self):
        if not self.load.ramp_up or self.count >= self.concurrency:
            if self.need_start_loop is None:
                self.need_start_loop = True
            return -1  # special case, means no delay
        elif self.load.steps:
            step = math.floor(self.count / self.step_size)
            return step * self.step_len
        else:  # ramp-up case
            return self.count * self.load.ramp_up / self.concurrency

    def __get_time_offset_rps(self):
        if not self.load.ramp_up or self.time_offset > self.load.ramp_up:
            # limit iterations
            rps = self.load.throughput
            if self.need_start_loop is None:
                self.need_start_loop = True
        elif self.load.steps:
            rps = self.step_size * (math.floor(self.time_offset / self.step_len) + 1)
        else:  # ramp-up case
            xpos = math.sqrt(2 * self.count / self.ramp_up_slope)
            rps = xpos * self.ramp_up_slope

        return 1.0 / rps if rps else 0
Exemplo n.º 5
0
class Scheduler(object):
    REC_TYPE_SCHEDULE = 0
    REC_TYPE_LOOP_START = 1
    REC_TYPE_STOP = 2

    def __init__(self, load, payload_filename, parent_logger):
        super(Scheduler, self).__init__()
        self.need_start_loop = None
        self.log = parent_logger.getChild(self.__class__.__name__)
        self.load = load
        self.payload_file = FileReader(filename=payload_filename, parent_logger=self.log)
        if not load.duration and not load.iterations:
            self.iteration_limit = 1
        else:
            self.iteration_limit = load.iterations

        self.concurrency = load.concurrency if load.concurrency is not None else 1

        self.step_len = load.ramp_up / load.steps if load.steps and load.ramp_up else 0
        if load.throughput:
            self.ramp_up_slope = load.throughput / load.ramp_up if load.ramp_up else 0
            self.step_size = float(load.throughput) / load.steps if load.steps else 0
        else:
            self.ramp_up_slope = None
            self.step_size = float(self.concurrency) / load.steps if load.steps else 0

        self.count = 0.0
        self.time_offset = 0.0
        self.iterations = 0

    def _payload_reader(self):
        self.iterations = 1
        rec_type = self.REC_TYPE_SCHEDULE
        while True:
            payload_offset = self.payload_file.offset
            line = self.payload_file.get_line()
            if not line:  # rewind
                self.payload_file.offset = 0
                self.iterations += 1

                if self.need_start_loop and not self.iteration_limit:
                    self.need_start_loop = False
                    self.iteration_limit = self.iterations
                    rec_type = self.REC_TYPE_LOOP_START

                if self.iteration_limit and self.iterations > self.iteration_limit:
                    self.log.debug("Schedule iterations limit reached: %s", self.iteration_limit)
                    break

            if not line.strip():  # we're fine to skip empty lines between records
                continue

            parts = line.split(' ')
            if len(parts) < 2:
                raise TaurusInternalException("Wrong format for meta-info line: %s" % line)

            payload_len, marker = parts
            payload_len = int(payload_len)
            payload = self.payload_file.get_bytes(payload_len)
            yield payload_len, payload_offset, payload, marker.strip(), len(line), rec_type
            rec_type = self.REC_TYPE_SCHEDULE

    def generate(self):
        for payload_len, payload_offset, payload, marker, meta_len, record_type in self._payload_reader():
            if self.load.throughput:
                self.time_offset += self.__get_time_offset_rps()
                if self.load.duration and self.time_offset > self.load.duration:
                    self.log.debug("Duration limit reached: %s", self.time_offset)
                    break
            else:  # concurrency schedule
                self.time_offset = self.__get_time_offset_concurrency()

            overall_len = payload_len + meta_len
            yield self.time_offset, payload_len, payload_offset, payload, marker, record_type, overall_len
            self.count += 1

    def __get_time_offset_concurrency(self):
        if not self.load.ramp_up or self.count >= self.concurrency:
            if self.need_start_loop is None:
                self.need_start_loop = True
            return -1  # special case, means no delay
        elif self.load.steps:
            step = math.floor(self.count / self.step_size)
            return step * self.step_len
        else:  # ramp-up case
            return self.count * self.load.ramp_up / self.concurrency

    def __get_time_offset_rps(self):
        if not self.load.ramp_up or self.time_offset > self.load.ramp_up:
            # limit iterations
            rps = self.load.throughput
            if self.need_start_loop is None:
                self.need_start_loop = True
        elif self.load.steps:
            rps = self.step_size * (math.floor(self.time_offset / self.step_len) + 1)
        else:  # ramp-up case
            xpos = math.sqrt(2 * self.count / self.ramp_up_slope)
            rps = xpos * self.ramp_up_slope

        return 1.0 / rps if rps else 0