Beispiel #1
0
    def _set_env(self):
        props = BetterDict()
        props.merge(self.settings.get('properties'))
        props.merge(self.get_scenario().get("properties"))

        props['gatling.core.outputDirectoryBaseName'] = self.dir_prefix
        props['gatling.core.directory.resources'] = self.engine.artifacts_dir
        props['gatling.core.directory.results'] = self.engine.artifacts_dir

        props.merge(self._get_simulation_props())
        props.merge(self._get_load_props())
        props.merge(self._get_scenario_props())
        for key in sorted(props.keys()):
            prop = props[key]
            val_tpl = "%s"

            if isinstance(prop, string_types):
                if not is_windows(
                ):  # extend properties support (contained separators/quotes/etc.) on lin/mac
                    val_tpl = "%r"
                if PY2:
                    prop = prop.encode(
                        "utf-8", 'ignore')  # to convert from unicode into str

            self.env.add_java_param(
                {"JAVA_OPTS": ("-D%s=" + val_tpl) % (key, prop)})

        self.env.set({"NO_PAUSE": "TRUE"})
        self.env.add_java_param(
            {"JAVA_OPTS": self.settings.get("java-opts", None)})

        self.log.debug('JAVA_OPTS: "%s"', self.env.get("JAVA_OPTS"))
Beispiel #2
0
    def execute(self, args, cwd=None, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False, env=None):
        if cwd is None:
            cwd = self.engine.default_cwd

        environ = BetterDict()
        environ.merge(dict(os.environ))

        if env is not None:
            if is_windows():
                # as variables in windows are case insensitive we should provide correct merging
                cur_env = {name.upper(): environ[name] for name in environ}
                old_keys = set(env.keys())
                env = {name.upper(): env[name] for name in env}
                new_keys = set(env.keys())
                if old_keys != new_keys:
                    msg = 'Some taurus environment variables might be been lost: %s'
                    self.log.debug(msg, list(old_keys - new_keys))
                environ = BetterDict()
                environ.merge(cur_env)
            environ.merge(env)

        environ.merge({"TAURUS_ARTIFACTS_DIR": self.engine.artifacts_dir})

        environ = {key: environ[key] for key in environ.keys() if environ[key] is not None}

        self.log.debug("Executing shell from %s: %s", cwd, args)
        return shell_exec(args, cwd=cwd, stdout=stdout, stderr=stderr, stdin=stdin, shell=shell, env=environ)
Beispiel #3
0
    def execute(self, args, cwd=None, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False, env=None):
        if cwd is None:
            cwd = self.engine.default_cwd
        aliases = self.get_hostaliases()
        hosts_file = None
        if aliases:
            hosts_file = self.engine.create_artifact("hostaliases", "")
            with open(hosts_file, 'w') as fds:
                for key, value in iteritems(aliases):
                    fds.write("%s %s\n" % (key, value))

        environ = BetterDict()
        environ.merge(dict(os.environ))

        if aliases:
            environ["HOSTALIASES"] = hosts_file
        if env is not None:
            if is_windows:
                # as variables in windows are case insensitive we should provide correct merging
                cur_env = {name.upper(): environ[name] for name in environ}
                old_keys = set(env.keys())
                env = {name.upper(): env[name] for name in env}
                new_keys = set(env.keys())
                if old_keys != new_keys:
                    msg = 'Some taurus environment variables has been lost: %s'
                    self.log.warning(msg, list(old_keys - new_keys))
                environ = BetterDict()
                environ.merge(cur_env)
            environ.merge(env)

        environ.merge({"TAURUS_ARTIFACTS_DIR": self.engine.artifacts_dir})

        environ = {key: environ[key] for key in environ.keys() if environ[key] is not None}

        return shell_exec(args, cwd=cwd, stdout=stdout, stderr=stderr, stdin=stdin, shell=shell, env=environ)
Beispiel #4
0
    def _set_env(self):
        props = BetterDict()
        props.merge(self.settings.get('properties'))
        props.merge(self.get_scenario().get("properties"))

        props['gatling.core.outputDirectoryBaseName'] = self.dir_prefix
        props['gatling.core.directory.resources'] = self.engine.artifacts_dir
        props['gatling.core.directory.results'] = self.engine.artifacts_dir

        props.merge(self._get_simulation_props())
        props.merge(self._get_load_props())
        props.merge(self._get_scenario_props())
        for key in sorted(props.keys()):
            prop = props[key]
            val_tpl = "%s"
            if isinstance(prop, string_types):
                val_tpl = "%r"

            self.env.add_java_param(
                {"JAVA_OPTS": ("-D%s=" + val_tpl) % (key, prop)})

        self.env.set({"NO_PAUSE": "TRUE"})
        self.env.add_java_param(
            {"JAVA_OPTS": self.settings.get("java-opts", None)})

        self.log.debug('JAVA_OPTS: "%s"', self.env.get("JAVA_OPTS"))
    def get_kpi_body(self, data_buffer, is_final):
        # - reporting format:
        #   {labels: <data>,    # see below
        #    sourceID: <id of BlazeMeterClient object>,
        #    [is_final: True]}  # for last report
        #
        # - elements of 'data' are described in __get_label()
        #
        # - elements of 'intervals' are described in __get_interval()
        #   every interval contains info about response codes that were received on it.
        report_items = BetterDict()
        if data_buffer:
            self.owner.first_ts = min(self.owner.first_ts,
                                      data_buffer[0][DataPoint.TIMESTAMP])
            self.owner.last_ts = max(self.owner.last_ts,
                                     data_buffer[-1][DataPoint.TIMESTAMP])

            # following data is received in the cumulative way
            for label, kpi_set in iteritems(
                    data_buffer[-1][DataPoint.CUMULATIVE]):
                if self.owner.extend_report:
                    report_item = {}
                    for state in kpi_set:
                        report_item[state] = self.__get_label(
                            label, kpi_set[state])
                        self.__add_errors(report_item[state], kpi_set[state])
                else:
                    report_item = self.__get_label(label, kpi_set)
                    self.__add_errors(report_item, kpi_set)  # 'Errors' tab
                report_items[label] = report_item

            # fill 'Timeline Report' tab with intervals data
            # intervals are received in the additive way
            if report_items:
                for dpoint in data_buffer:
                    time_stamp = dpoint[DataPoint.TIMESTAMP]
                    for label, kpi_set in iteritems(dpoint[DataPoint.CURRENT]):
                        exc = TaurusInternalException(
                            'Cumulative KPISet is non-consistent')
                        report_item = report_items.get(label, exc)

                        if self.owner.extend_report:
                            for state in report_item:
                                if state in kpi_set:
                                    report_item[state]['intervals'].append(
                                        self.__get_interval(
                                            kpi_set[state], time_stamp))
                        else:
                            report_item['intervals'].append(
                                self.__get_interval(kpi_set, time_stamp))

        report_items = [
            report_items[key] for key in sorted(report_items.keys())
        ]  # convert dict to list
        data = {"labels": report_items, "sourceID": id(self.owner)}
        if is_final:
            data['final'] = True

        return to_json(data)
Beispiel #6
0
    def _extract_rest_request(self, test_step):
        label = test_step.get('name')
        config = test_step.find('./con:config', namespaces=self.NAMESPACES)
        method = config.get('method')

        method_name = config.get('methodName')
        method_obj = self.interface.find('.//con:method[@name="%s"]' % method_name, namespaces=self.NAMESPACES)
        params = BetterDict()
        if method_obj is not None:
            parent = method_obj.getparent()
            while parent.tag.endswith('resource'):
                for param in parent.findall('./con:parameters/con:parameter', namespaces=self.NAMESPACES):
                    param_name = param.findtext('./con:name', namespaces=self.NAMESPACES)
                    param_value = param.findtext('./con:value', namespaces=self.NAMESPACES)
                    def_value = param.findtext('./con:default', namespaces=self.NAMESPACES)
                    if param_value:
                        params[param_name] = param_value
                    elif def_value:
                        params[param_name] = def_value

                parent = parent.getparent()

        url = self._calc_base_address(test_step) + config.get('resourcePath')
        headers = self._extract_headers(config)
        assertions = self._extract_assertions(config)

        params.merge({
            entry.get("key"): entry.get("value")
            for entry in config.findall('./con:restRequest/con:parameters/con:entry', namespaces=self.NAMESPACES)
        })

        for param_name in copy.copy(list(params.keys())):
            template = "{" + param_name + "}"
            if template in url:
                param_value = params.pop(param_name)
                url = url.replace(template, param_value)

        request = {"url": url, "label": label}

        if method is not None and method != "GET":
            request["method"] = method

        if headers:
            request["headers"] = headers

        if assertions:
            request["assert"] = assertions

        body = {}
        for key, value in iteritems(params):
            body[key] = value

        if body:
            request["body"] = body

        return request
Beispiel #7
0
    def _set_env(self):
        props = BetterDict()
        props.merge(self.settings.get('properties'))
        props.merge(self.get_scenario().get("properties"))

        props['gatling.core.outputDirectoryBaseName'] = self.dir_prefix
        props['gatling.core.directory.resources'] = self.engine.artifacts_dir
        props['gatling.core.directory.results'] = self.engine.artifacts_dir

        props.merge(self._get_simulation_props())
        props.merge(self._get_load_props())
        props.merge(self._get_scenario_props())
        for key in sorted(props.keys()):
            prop = props[key]
            val_tpl = "%s"

            if isinstance(prop, string_types):
                if not is_windows(
                ):  # extend properties support (contained separators/quotes/etc.) on lin/mac
                    val_tpl = "%r"
                if PY2:
                    prop = prop.encode(
                        "utf-8", 'ignore')  # to convert from unicode into str

            if is_gatling2(self.tool.version) or not key.startswith(
                    'gatling.'):  # send param through java_opts
                self.env.add_java_param(
                    {"JAVA_OPTS": ("-D%s=" + val_tpl) % (key, prop)})

        self.env.set({"NO_PAUSE": "TRUE"})
        self.env.add_java_param(
            {"JAVA_OPTS": self.settings.get("java-opts", None)})

        self.log.debug('JAVA_OPTS: "%s"', self.env.get("JAVA_OPTS"))

        if not is_gatling2(self.tool.version):  # cook prop file
            prop_lines = []
            for key in props:
                if key.startswith("gatling."):
                    prop_lines.append("%s = %s" % (key, props[key]))

            conf_dir = self.engine.create_artifact("conf", "")
            os.mkdir(conf_dir)
            with open(os.path.join(conf_dir, "gatling.conf"),
                      'w') as conf_file:
                conf_file.write('\n'.join(prop_lines))

            self.env.add_path({"GATLING_CONF": conf_dir})
Beispiel #8
0
    def execute(self,
                args,
                cwd=None,
                stdout=PIPE,
                stderr=PIPE,
                stdin=PIPE,
                shell=False,
                env=None):
        if cwd is None:
            cwd = self.engine.default_cwd
        aliases = self.get_hostaliases()
        hosts_file = None
        if aliases:
            hosts_file = self.engine.create_artifact("hostaliases", "")
            with open(hosts_file, 'w') as fds:
                for key, value in iteritems(aliases):
                    fds.write("%s %s\n" % (key, value))

        environ = BetterDict()
        environ.merge(dict(os.environ))

        if aliases:
            environ["HOSTALIASES"] = hosts_file
        if env is not None:
            environ.merge(env)

        environ.merge({"TAURUS_ARTIFACTS_DIR": self.engine.artifacts_dir})

        environ = {
            key: environ[key]
            for key in environ.keys() if environ[key] is not None
        }

        return shell_exec(args,
                          cwd=cwd,
                          stdout=stdout,
                          stderr=stderr,
                          stdin=stdin,
                          shell=shell,
                          env=environ)
Beispiel #9
0
class ConsolidatingAggregator(EngineModule, ResultsProvider):
    """

    :type underlings: list[bzt.modules.aggregator.ResultsProvider]
    """
    # FIXME: it was oscillating with remote test of 100 servers
    def __init__(self):
        EngineModule.__init__(self)
        ResultsProvider.__init__(self)
        self.generalize_labels = True
        self.ignored_labels = []
        self.underlings = []
        self.buffer = BetterDict()
        self.buffer_len = 2

    def prepare(self):
        """
        Read aggregation options
        """
        super(ConsolidatingAggregator, self).prepare()
        self.track_percentiles = self.settings.get("percentiles", self.track_percentiles)
        self.buffer_len = self.settings.get("buffer-seconds", self.buffer_len)
        self.ignored_labels = self.settings.get("ignore-labels", self.ignored_labels)
        self.generalize_labels = self.settings.get("generalize-labels", self.generalize_labels)

    def add_underling(self, underling):
        """
        Add source for aggregating

        :type underling: ResultsProvider
        """
        underling.track_percentiles = self.track_percentiles
        if isinstance(underling, ResultsReader):
            underling.ignored_labels = self.ignored_labels
            underling.generalize_labels = self.generalize_labels
            # underling.buffer_len = self.buffer_len  # NOTE: is it ok for underling to have the same buffer len?
        self.underlings.append(underling)

    def check(self):
        """
        Check if there is next aggregate data present

        :rtype: bool
        """
        for point in self.datapoints():
            self.log.debug("Processed datapoint: %s/%s", point[DataPoint.TIMESTAMP], point[DataPoint.SOURCE_ID])
        return super(ConsolidatingAggregator, self).check()

    def post_process(self):
        """
        Process all remaining aggregate data
        """
        super(ConsolidatingAggregator, self).post_process()
        for point in self.datapoints(True):
            self.log.debug("Processed datapoint: %s/%s", point[DataPoint.TIMESTAMP], point[DataPoint.SOURCE_ID])

    def _process_underlings(self, final_pass):
        for underling in self.underlings:
            for data in [x for x in underling.datapoints(final_pass)]:
                tstamp = data[DataPoint.TIMESTAMP]
                if self.buffer:
                    mints = min(self.buffer.keys())
                    if tstamp < mints:
                        self.log.warning("Putting datapoint %s into %s", tstamp, mints)
                        data[DataPoint.TIMESTAMP] = mints
                        tstamp = mints
                self.buffer.get(tstamp, []).append(data)

    def _calculate_datapoints(self, final_pass=False):
        """
        Override ResultsProvider._calculate_datapoints

        """
        self._process_underlings(final_pass)

        self.log.debug("Consolidator buffer[%s]: %s", len(self.buffer), self.buffer.keys())
        if not self.buffer:
            return

        timestamps = sorted(self.buffer.keys())
        while timestamps and (final_pass or (timestamps[-1] >= timestamps[0] + self.buffer_len)):
            tstamp = timestamps.pop(0)
            self.log.debug("Merging into %s", tstamp)
            points_to_consolidate = self.buffer.pop(tstamp)
            point = DataPoint(tstamp, self.track_percentiles)
            for subresult in points_to_consolidate:
                self.log.debug("Merging %s", subresult[DataPoint.TIMESTAMP])
                point.merge_point(subresult)
            point.recalculate()
            yield point
Beispiel #10
0
class ConsolidatingAggregator(Aggregator, ResultsProvider):
    """

    :type underlings: list[bzt.modules.aggregator.ResultsProvider]
    """

    # TODO: switch to underling-count-based completeness criteria
    def __init__(self):
        Aggregator.__init__(self, is_functional=False)
        ResultsProvider.__init__(self)
        self.generalize_labels = False
        self.ignored_labels = ["ignore"]
        self.underlings = []
        self.buffer = BetterDict()
        self.rtimes_len = 1000

    def prepare(self):
        """
        Read aggregation options
        """
        super(ConsolidatingAggregator, self).prepare()

        # make unique & sort
        self.track_percentiles = self.settings.get("percentiles",
                                                   self.track_percentiles)
        self.track_percentiles = list(set(self.track_percentiles))
        self.track_percentiles.sort()
        self.settings["percentiles"] = self.track_percentiles

        self.ignored_labels = self.settings.get("ignore-labels",
                                                self.ignored_labels)
        self.generalize_labels = self.settings.get("generalize-labels",
                                                   self.generalize_labels)

        self.min_buffer_len = dehumanize_time(
            self.settings.get("min-buffer-len", self.min_buffer_len))

        max_buffer_len = self.settings.get("max-buffer-len",
                                           self.max_buffer_len)
        try:
            self.max_buffer_len = dehumanize_time(max_buffer_len)
        except TaurusInternalException as exc:
            self.log.debug("Exception in dehumanize_time(%s): %s",
                           max_buffer_len, exc)
            raise TaurusConfigError("Wrong 'max-buffer-len' value: %s" %
                                    max_buffer_len)

        self.buffer_multiplier = self.settings.get("buffer-multiplier",
                                                   self.buffer_multiplier)

        count = len(self.track_percentiles)
        if count == 1:
            self.buffer_scale_idx = str(float(self.track_percentiles[0]))
        if count > 1:
            percentile = self.settings.get("buffer-scale-choice", 0.5)
            percentiles = [i / (count - 1.0) for i in range(count)]
            distances = [
                abs(percentile - percentiles[i]) for i in range(count)
            ]
            index_position = distances.index(min(distances))
            self.buffer_scale_idx = str(
                float(self.track_percentiles[index_position]))

        debug_str = 'Buffer scaling setup: percentile %s from %s selected'
        self.log.debug(debug_str, self.buffer_scale_idx,
                       self.track_percentiles)
        self.rtimes_len = self.settings.get("rtimes-len", self.rtimes_len)

    def add_underling(self, underling):
        """
        Add source for aggregating

        :type underling: ResultsProvider
        """
        underling.track_percentiles = self.track_percentiles
        if isinstance(underling, ResultsReader):
            underling.ignored_labels = self.ignored_labels
            underling.generalize_labels = self.generalize_labels
            underling.min_buffer_len = self.min_buffer_len
            underling.max_buffer_len = self.max_buffer_len
            underling.buffer_multiplier = self.buffer_multiplier
            underling.buffer_scale_idx = self.buffer_scale_idx
            underling.rtimes_len = self.rtimes_len

        self.underlings.append(underling)

    def check(self):
        """
        Check if there is next aggregate data present

        :rtype: bool
        """
        for point in self.datapoints():
            self.log.debug("Processed datapoint: %s/%s",
                           point[DataPoint.TIMESTAMP],
                           point[DataPoint.SOURCE_ID])
        return super(ConsolidatingAggregator, self).check()

    def post_process(self):
        """
        Process all remaining aggregate data
        """
        super(ConsolidatingAggregator, self).post_process()
        for point in self.datapoints(True):
            self.log.debug("Processed datapoint: %s/%s",
                           point[DataPoint.TIMESTAMP],
                           point[DataPoint.SOURCE_ID])

    def _process_underlings(self, final_pass):
        for underling in self.underlings:
            for data in underling.datapoints(final_pass):
                tstamp = data[DataPoint.TIMESTAMP]
                if self.buffer:
                    mints = min(self.buffer.keys())
                    if tstamp < mints:
                        self.log.debug("Putting datapoint %s into %s", tstamp,
                                       mints)
                        data[DataPoint.TIMESTAMP] = mints
                        tstamp = mints
                self.buffer.get(tstamp, [], force_set=True).append(data)

    def _calculate_datapoints(self, final_pass=False):
        """
        Override ResultsProvider._calculate_datapoints

        """
        self._process_underlings(final_pass)

        self.log.debug("Consolidator buffer[%s]: %s", len(self.buffer),
                       self.buffer.keys())
        if not self.buffer:
            return

        timestamps = sorted(self.buffer.keys())
        while timestamps and (
                final_pass or
            (timestamps[-1] >= timestamps[0] + self.buffer_len)):
            tstamp = timestamps.pop(0)
            self.log.debug("Merging into %s", tstamp)
            points_to_consolidate = self.buffer.pop(tstamp)
            point = DataPoint(tstamp, self.track_percentiles)
            for subresult in points_to_consolidate:
                self.log.debug("Merging %s", subresult[DataPoint.TIMESTAMP])
                point.merge_point(subresult)
            point.recalculate()
            yield point
Beispiel #11
0
class ConsolidatingAggregator(EngineModule, ResultsProvider):
    """

    :type underlings: list[bzt.modules.aggregator.ResultsProvider]
    """

    # TODO: switch to underling-count-based completeness criteria
    def __init__(self):
        EngineModule.__init__(self)
        ResultsProvider.__init__(self)
        self.generalize_labels = False
        self.ignored_labels = []
        self.underlings = []
        self.buffer = BetterDict()

    def prepare(self):
        """
        Read aggregation options
        """
        super(ConsolidatingAggregator, self).prepare()

        # make unique & sort
        percentiles = self.settings.get("percentiles", self.track_percentiles)
        percentiles = list(set(percentiles))
        percentiles.sort()
        self.track_percentiles = percentiles
        self.settings['percentiles'] = percentiles

        self.ignored_labels = self.settings.get("ignore-labels", self.ignored_labels)
        self.generalize_labels = self.settings.get("generalize-labels", self.generalize_labels)

        self.min_buffer_len = dehumanize_time(self.settings.get("min-buffer-len", self.min_buffer_len))

        max_buffer_len = self.settings.get("max-buffer-len", self.max_buffer_len)
        try:  # for max_buffer_len == float('inf')
            self.max_buffer_len = dehumanize_time(max_buffer_len)
        except ValueError as verr:
            if str(verr).find('inf') != -1:
                self.max_buffer_len = max_buffer_len
            else:
                raise

        self.buffer_multiplier = self.settings.get("buffer-multiplier", self.buffer_multiplier)

        percentile = self.settings.get("buffer-scale-choice", 0.5)
        count = len(self.track_percentiles)
        if count == 1:
            self.buffer_scale_idx = str(float(self.track_percentiles[0]))
        if count > 1:
            percentiles = [i / (count - 1.0) for i in range(count)]
            distances = [abs(percentile - percentiles[i]) for i in range(count)]
            index_position = distances.index(min(distances))
            self.buffer_scale_idx = str(float(self.track_percentiles[index_position]))

        debug_str = 'Buffer scaling setup: percentile %s from %s selected'
        self.log.debug(debug_str, self.buffer_scale_idx, self.track_percentiles)

    def add_underling(self, underling):
        """
        Add source for aggregating

        :type underling: ResultsProvider
        """
        underling.track_percentiles = self.track_percentiles
        if isinstance(underling, ResultsReader):
            underling.ignored_labels = self.ignored_labels
            underling.generalize_labels = self.generalize_labels
            underling.min_buffer_len = self.min_buffer_len
            underling.max_buffer_len = self.max_buffer_len
            underling.buffer_multiplier = self.buffer_multiplier
            underling.buffer_scale_idx = self.buffer_scale_idx

        self.underlings.append(underling)

    def check(self):
        """
        Check if there is next aggregate data present

        :rtype: bool
        """
        for point in self.datapoints():
            self.log.debug("Processed datapoint: %s/%s", point[DataPoint.TIMESTAMP], point[DataPoint.SOURCE_ID])
        return super(ConsolidatingAggregator, self).check()

    def post_process(self):
        """
        Process all remaining aggregate data
        """
        super(ConsolidatingAggregator, self).post_process()
        for point in self.datapoints(True):
            self.log.debug("Processed datapoint: %s/%s", point[DataPoint.TIMESTAMP], point[DataPoint.SOURCE_ID])

    def _process_underlings(self, final_pass):
        for underling in self.underlings:
            for data in underling.datapoints(final_pass):
                tstamp = data[DataPoint.TIMESTAMP]
                if self.buffer:
                    mints = min(self.buffer.keys())
                    if tstamp < mints:
                        self.log.warning("Putting datapoint %s into %s", tstamp, mints)
                        data[DataPoint.TIMESTAMP] = mints
                        tstamp = mints
                self.buffer.get(tstamp, []).append(data)

    def _calculate_datapoints(self, final_pass=False):
        """
        Override ResultsProvider._calculate_datapoints

        """
        self._process_underlings(final_pass)

        self.log.debug("Consolidator buffer[%s]: %s", len(self.buffer), self.buffer.keys())
        if not self.buffer:
            return

        timestamps = sorted(self.buffer.keys())
        while timestamps and (final_pass or (timestamps[-1] >= timestamps[0] + self.buffer_len)):
            tstamp = timestamps.pop(0)
            self.log.debug("Merging into %s", tstamp)
            points_to_consolidate = self.buffer.pop(tstamp)
            point = DataPoint(tstamp, self.track_percentiles)
            for subresult in points_to_consolidate:
                self.log.debug("Merging %s", subresult[DataPoint.TIMESTAMP])
                point.merge_point(subresult)
            point.recalculate()
            yield point
Beispiel #12
0
class ConsolidatingAggregator(EngineModule, ResultsProvider):
    """

    :type underlings: list[bzt.modules.aggregator.ResultsProvider]
    """

    # TODO: switch to underling-count-based completeness criteria
    def __init__(self):
        EngineModule.__init__(self)
        ResultsProvider.__init__(self)
        self.generalize_labels = False
        self.ignored_labels = []
        self.underlings = []
        self.buffer = BetterDict()
        self.buffer_len = 2

    def prepare(self):
        """
        Read aggregation options
        """
        super(ConsolidatingAggregator, self).prepare()
        self.track_percentiles = self.settings.get("percentiles",
                                                   self.track_percentiles)
        self.buffer_len = self.settings.get("buffer-seconds", self.buffer_len)
        self.ignored_labels = self.settings.get("ignore-labels",
                                                self.ignored_labels)
        self.generalize_labels = self.settings.get("generalize-labels",
                                                   self.generalize_labels)

    def add_underling(self, underling):
        """
        Add source for aggregating

        :type underling: ResultsProvider
        """
        underling.track_percentiles = self.track_percentiles
        if isinstance(underling, ResultsReader):
            underling.ignored_labels = self.ignored_labels
            underling.generalize_labels = self.generalize_labels
            # underling.buffer_len = self.buffer_len  # NOTE: is it ok for underling to have the same buffer len?
        self.underlings.append(underling)

    def check(self):
        """
        Check if there is next aggregate data present

        :rtype: bool
        """
        for point in self.datapoints():
            self.log.debug("Processed datapoint: %s/%s",
                           point[DataPoint.TIMESTAMP],
                           point[DataPoint.SOURCE_ID])
        return super(ConsolidatingAggregator, self).check()

    def post_process(self):
        """
        Process all remaining aggregate data
        """
        super(ConsolidatingAggregator, self).post_process()
        for point in self.datapoints(True):
            self.log.debug("Processed datapoint: %s/%s",
                           point[DataPoint.TIMESTAMP],
                           point[DataPoint.SOURCE_ID])

    def _process_underlings(self, final_pass):
        for underling in self.underlings:
            for data in [x for x in underling.datapoints(final_pass)]:
                tstamp = data[DataPoint.TIMESTAMP]
                if self.buffer:
                    mints = min(self.buffer.keys())
                    if tstamp < mints:
                        self.log.warning("Putting datapoint %s into %s",
                                         tstamp, mints)
                        data[DataPoint.TIMESTAMP] = mints
                        tstamp = mints
                self.buffer.get(tstamp, []).append(data)

    def _calculate_datapoints(self, final_pass=False):
        """
        Override ResultsProvider._calculate_datapoints

        """
        self._process_underlings(final_pass)

        self.log.debug("Consolidator buffer[%s]: %s", len(self.buffer),
                       self.buffer.keys())
        if not self.buffer:
            return

        timestamps = sorted(self.buffer.keys())
        while timestamps and (
                final_pass or
            (timestamps[-1] >= timestamps[0] + self.buffer_len)):
            tstamp = timestamps.pop(0)
            self.log.debug("Merging into %s", tstamp)
            points_to_consolidate = self.buffer.pop(tstamp)
            point = DataPoint(tstamp, self.track_percentiles)
            for subresult in points_to_consolidate:
                self.log.debug("Merging %s", subresult[DataPoint.TIMESTAMP])
                point.merge_point(subresult)
            point.recalculate()
            yield point
Beispiel #13
0
class PipInstaller(Service):
    def __init__(self, packages=None, temp_flag=True):
        super(PipInstaller, self).__init__()
        self.packages = packages or []
        self.versions = BetterDict()
        self.engine = None
        self.temp = temp_flag
        self.target_dir = None
        self.interpreter = sys.executable
        self.pip_cmd = [self.interpreter, "-m", "pip"]

    def _check_pip(self):
        cmdline = self.pip_cmd + ["--version"]
        try:
            exec_and_communicate(cmdline)
        except TaurusCalledProcessError as exc:
            self.log.debug(exc)
            raise TaurusInternalException(
                "pip module not found for interpreter %s" % self.interpreter)

    def _get_installed(self):
        cmdline = self.pip_cmd + ["list"]
        out, _ = exec_and_communicate(cmdline)
        out = out.split('\n')[2:-1]
        return dict(
            zip([line.split(' ')[0] for line in out],
                [line.strip().split(' ')[-1] for line in out]))

    def _missed(self, packages):
        installed = self._get_installed()
        missed = []
        for package in packages:
            if package not in installed or package in self.versions and installed[
                    package] != self.versions[package]:
                missed.append(package)
        return missed

    def _convert_config_versions(self):
        """
        extract from packages config:
          packages:
            - one
            - two==0.0.0
            - name: three
              version: 0.0.0
        and add to self.packages and self.versions
        """
        packages_list = self.parameters.get("packages", None)
        if not packages_list:
            return

        for package_data in packages_list:
            package, version = None, None
            if isinstance(package_data, dict):
                package, version = package_data['name'], package_data.get(
                    "version", None)
            elif isinstance(package_data, str):
                package_params = package_data.split("==")
                package, version = package_params[0], package_params[1] if len(
                    package_params) > 1 else None

            self.packages.append(package)
            if version:
                self.versions[package] = version

    def prepare_pip(self):
        """
        pip-installer expect follow definition:
        - service pip-install
          temp: false   # install to ~/.bzt instead of artifacts dir
          packages:
          - first_pkg
          - second_pkg
        """
        self._check_pip()

        self._convert_config_versions()
        if not self.packages:
            return

        # install into artifacts dir if temp, otherwise into .bzt
        self.temp = self.settings.get("temp", self.temp)
        self.temp = self.parameters.get("temp", self.temp)

        self.target_dir = self.engine.temp_pythonpath if self.temp else self.engine.user_pythonpath

        if not os.path.exists(self.target_dir):
            os.makedirs(get_full_path(self.target_dir), exist_ok=True)

    def prepare(self):
        self.prepare_pip()
        if not self.all_packages_installed():
            self.install()

    def all_packages_installed(self):
        self.packages = self._missed(self.packages)
        self.versions = {
            package: self.versions[package]
            for package in self.versions.keys() if package in self.packages
        }
        return False if self.packages else True

    def install(self):
        if not self.packages:
            self.log.debug("Nothing to install")
            return
        cmdline = self.pip_cmd + ["install", "-t", self.target_dir]
        for package in self.packages:
            version = self.versions.get(package, None)
            cmdline += [f"{package}=={version}"] if version else [package]
        cmdline += ["--upgrade"]
        self.log.debug("pip-installer cmdline: '%s'" % ' '.join(cmdline))
        try:
            out, err = exec_and_communicate(cmdline)
        except TaurusCalledProcessError as exc:
            self.log.debug(exc)
            for line in exc.output.split('\n'):
                if line.startswith("ERROR"):
                    self.log.error(" ".join(line.split(" ")[1:]))
            return
        if "Successfully installed" in out:
            self.log.info(out.split("\n")[-2])
            for err_line in err.split("\n"):
                if err_line.startswith('WARNING'):
                    self.log.warning(" ".join(err_line.split(" ")[1:]))
                if err_line.startswith('ERROR'):
                    self.log.error(" ".join(err_line.split(" ")[1:]))
        self.log.debug("pip-installer stdout: \n%s" % out)
        if err:
            self.log.debug("pip-installer stderr:\n%s" % err)

    def get_version(self, package):
        installed = self._get_installed()
        return installed[package]

    def post_process(self):
        # might be forbidden on win as tool still work
        if self.packages and self.temp and not is_windows() and os.path.exists(
                self.target_dir):
            self.log.debug("remove packages: %s" % self.packages)

            shutil.rmtree(
                self.target_dir
            )  # it removes all content of directory in reality, not only self.packages