Exemplo n.º 1
0
class TestJTLReader(BZTestCase):
    def setUp(self):
        super(TestJTLReader, self).setUp()
        self.obj = None

    def configure(self, jtl_file):
        self.obj = JTLReader(jtl_file, logging.getLogger(''))

    def tearDown(self):
        if self.obj:
            close_reader_file(self.obj.csvreader)
            close_reader_file(self.obj.errors_reader)
        super(TestJTLReader, self).tearDown()

    def test_tranctl_jtl(self):
        self.configure(RESOURCES_DIR + "/jmeter/jtl/tranctl.jtl")
        values = [x for x in self.obj.datapoints(final_pass=True)]
        self.assertEquals(1, len(values))

    def test_tabs_jtl(self):
        self.configure(RESOURCES_DIR + "/jmeter/jtl/tabs.jtl")
        values = [x for x in self.obj.datapoints(final_pass=True)]
        self.assertEquals(4, len(values))

    def test_reader_unicode(self):
        self.configure(RESOURCES_DIR + "/jmeter/jtl/unicode.jtl")
        self.obj.ignored_labels = [u"Тест.Эхо"]
        for point in self.obj.datapoints(final_pass=True):
            cumulative = point[DataPoint.CUMULATIVE]
            self.assertIn(u"САП.АутентифицироватьРасш", cumulative)
            self.assertNotIn(u"Тест.Эхо", cumulative)

    def test_jtl_doublequoting(self):
        self.configure(RESOURCES_DIR + "/jmeter/jtl/doublequoting.jtl")
        list(self.obj.datapoints(final_pass=True))
Exemplo n.º 2
0
class TestJTLReader(BZTestCase):
    def setUp(self):
        super(TestJTLReader, self).setUp()
        self.obj = None

    def configure(self, jtl_file):
        self.obj = JTLReader(jtl_file, logging.getLogger(''))

    def tearDown(self):
        if self.obj:
            close_reader_file(self.obj.csvreader)
            close_reader_file(self.obj.errors_reader)
        super(TestJTLReader, self).tearDown()

    def test_tranctl_jtl(self):
        self.configure(RESOURCES_DIR + "/jmeter/jtl/tranctl.jtl")
        values = [x for x in self.obj.datapoints(final_pass=True)]
        self.assertEquals(1, len(values))

    def test_tabs_jtl(self):
        self.configure(RESOURCES_DIR + "/jmeter/jtl/tabs.jtl")
        values = [x for x in self.obj.datapoints(final_pass=True)]
        self.assertEquals(4, len(values))

    def test_reader_unicode(self):
        self.configure(RESOURCES_DIR + "/jmeter/jtl/unicode.jtl")
        self.obj.ignored_labels = [u"Тест.Эхо"]
        for point in self.obj.datapoints(final_pass=True):
            cumulative = point[DataPoint.CUMULATIVE]
            self.assertIn(u"САП.АутентифицироватьРасш", cumulative)
            self.assertNotIn(u"Тест.Эхо", cumulative)

    def test_jtl_doublequoting(self):
        self.configure(RESOURCES_DIR + "/jmeter/jtl/doublequoting.jtl")
        list(self.obj.datapoints(final_pass=True))
Exemplo n.º 3
0
    def test_jtl_quoting_issue(self):
        def exec_and_communicate(*args, **kwargs):
            return "", ""

        self.configure({"execution": {
            "concurrency": 1,
            "iterations": 1,
            "scenario": {
                "default-address": "http://httpbin.org/status/503",
                "requests": [
                    "/"
                ]
            }
        }})
        tmp_aec, tmp_ex = bzt.utils.exec_and_communicate, sys.executable
        try:
            bzt.utils.exec_and_communicate = exec_and_communicate
            self.obj.prepare()
            sys.executable = RESOURCES_DIR + "locust/locust-mock" + EXE_SUFFIX
            self.obj.startup()
        finally:
            bzt.utils.exec_and_communicate = tmp_aec
            sys.executable = tmp_ex

        while not self.obj.check():
            time.sleep(self.obj.engine.check_interval)
        self.obj.shutdown()
        self.obj.post_process()

        kpi_path = RESOURCES_DIR + "locust/locust-kpi.jtl"

        reader = JTLReader(kpi_path, self.obj.log)
        list(reader.datapoints())
Exemplo n.º 4
0
    def prepare(self):
        self.install_required_tools()
        self.scenario = self.get_scenario()
        self.__setup_script()

        self.is_master = self.execution.get("master", self.is_master)
        if self.is_master:
            count_error = TaurusConfigError(
                "Slaves count required when starting in master mode")
            slaves = self.execution.get("slaves", count_error)
            self.expected_slaves = int(slaves)

        self.engine.existing_artifact(self.script)

        if self.is_master:
            self.slaves_ldjson = self.engine.create_artifact(
                "locust-slaves", ".ldjson")
            self.reader = SlavesReader(self.slaves_ldjson,
                                       self.expected_slaves, self.log)
        else:
            self.kpi_jtl = self.engine.create_artifact("kpi", ".jtl")
            self.reader = JTLReader(self.kpi_jtl, self.log, None)

        if isinstance(self.engine.aggregator, ConsolidatingAggregator):
            self.engine.aggregator.add_underling(self.reader)
Exemplo n.º 5
0
    def _get_reader(self):
        with open(self.data_file) as fhd:
            header = fhd.readline(2048).strip()  # just header chunk of file

        # TODO: detect CSV dialect for JTLs

        if header.startswith(self.AB_HEADER):
            reader = TSVDataReader(self.data_file, self.log)
            reader.url_label = "N/A"
            return reader
        elif header.startswith("<?xml"):
            return XMLJTLReader(self.data_file, self.log)
        elif self.PBENCH_FORMAT.match(header):
            return PBenchKPIReader(self.data_file, self.log, self.errors_file)
        elif header.startswith("RUN\t") or "\tRUN\t" in header:
            return GatlingLogReader(self.data_file, self.log, None)
        elif "timestamp" in header.lower() and "elapsed" in header.lower():
            return JTLReader(self.data_file, self.log, self.errors_file)
        elif "worker process" in header.lower() and header.startswith(
                "worker."):
            return GrinderLogReader(self.data_file, self.log)
        else:
            self.log.info("Header line was: %s", header)
            raise TaurusInternalException(
                "Unable to detect results format for: %s" % self.data_file)
Exemplo n.º 6
0
    def prepare(self):
        self.__check_installed()
        self.locustfile = self.get_locust_file()
        if not self.locustfile or not os.path.exists(self.locustfile):
            raise ValueError("Locust file not found: %s" % self.locustfile)

        self.is_master = self.execution.get("master", self.is_master)
        if self.is_master:
            slaves = self.execution.get(
                "slaves",
                ValueError(
                    "Slaves count required when starting in master mode"))
            self.expected_slaves = int(slaves)

        self.engine.existing_artifact(self.locustfile)

        if self.is_master:
            self.slaves_ldjson = self.engine.create_artifact(
                "locust-slaves", ".ldjson")
            self.reader = SlavesReader(self.slaves_ldjson,
                                       self.expected_slaves, self.log)
        else:
            self.kpi_jtl = self.engine.create_artifact("kpi", ".jtl")
            self.reader = JTLReader(self.kpi_jtl, self.log, None)

        if isinstance(self.engine.aggregator, ConsolidatingAggregator):
            self.engine.aggregator.add_underling(self.reader)
Exemplo n.º 7
0
    def prepare(self):
        self.set_virtual_display()
        self.scenario = self.get_scenario()
        self._verify_script()
        self.kpi_file = self.engine.create_artifact("selenium_tests_report", ".csv")
        self.err_jtl = self.engine.create_artifact("selenium_tests_err", ".xml")
        script_type = self.detect_script_type(self.scenario.get(Scenario.SCRIPT))

        runner_config = BetterDict()

        if script_type == ".py":
            runner_class = NoseTester
            runner_config.merge(self.settings.get("selenium-tools").get("nose"))
        else:  # script_type == ".jar" or script_type == ".java":
            runner_class = JUnitTester
            runner_config.merge(self.settings.get("selenium-tools").get("junit"))
            runner_config['props-file'] = self.engine.create_artifact("customrunner", ".properties")

        runner_config["script-type"] = script_type
        self.runner_working_dir = self.engine.create_artifact(runner_config.get("working-dir", "classes"), "")
        runner_config["working-dir"] = self.runner_working_dir
        runner_config.get("artifacts-dir", self.engine.artifacts_dir)
        runner_config.get("working-dir", self.runner_working_dir)
        runner_config.get("report-file", self.kpi_file)
        runner_config.get("err-file", self.err_jtl)
        runner_config.get("stdout", self.engine.create_artifact("junit", ".out"))
        runner_config.get("stderr", self.engine.create_artifact("junit", ".err"))

        self._cp_resource_files(self.runner_working_dir)

        self.runner = runner_class(runner_config, self.scenario, self.get_load(), self.log)
        self.runner.prepare()
        self.reader = JTLReader(self.kpi_file, self.log, self.err_jtl)
        if isinstance(self.engine.aggregator, ConsolidatingAggregator):
            self.engine.aggregator.add_underling(self.reader)
Exemplo n.º 8
0
    def prepare(self):
        self.stdout = open(self.engine.create_artifact("locust", ".out"), 'w')
        self.stderr = open(self.engine.create_artifact("locust", ".err"), 'w')

        self.install_required_tools()
        self.scenario = self.get_scenario()
        self.__setup_script()
        self.engine.existing_artifact(self.script)

        # path to taurus dir. It's necessary for bzt usage inside tools/helpers
        self.env.add_path({"PYTHONPATH": get_full_path(__file__, step_up=3)})

        self.is_master = self.execution.get("master", self.is_master)

        if self.is_master:
            count_error = TaurusConfigError(
                "Slaves count required when starting in master mode")
            self.expected_slaves = int(
                self.execution.get("slaves", count_error))
            slaves_ldjson = self.engine.create_artifact(
                "locust-slaves", ".ldjson")
            self.reader = SlavesReader(slaves_ldjson, self.expected_slaves,
                                       self.log)
            self.env.set({"SLAVES_LDJSON": slaves_ldjson})
        else:
            kpi_jtl = self.engine.create_artifact("kpi", ".jtl")
            self.reader = JTLReader(kpi_jtl, self.log)
            self.env.set({"JTL": kpi_jtl})

        if isinstance(self.engine.aggregator, ConsolidatingAggregator):
            self.engine.aggregator.add_underling(self.reader)
Exemplo n.º 9
0
    def prepare(self):
        """
        1) Locate script or folder
        2) detect script type
        3) create runner instance, prepare runner
        """
        self.scenario = self.get_scenario()
        self._verify_script()
        self.kpi_file = self.engine.create_artifact("selenium_tests_report",
                                                    ".csv")
        self.err_jtl = self.engine.create_artifact("selenium_tests_err",
                                                   ".xml")
        script_type = self.detect_script_type(
            self.scenario.get(Scenario.SCRIPT))

        if script_type == ".py":
            self.runner = NoseTester
            runner_config = self.settings.get("selenium-tools").get("nose")
        elif script_type == ".jar" or script_type == ".java":
            self.runner = JunitTester
            runner_config = self.settings.get("selenium-tools").get("junit")
        else:
            raise ValueError("Unsupported script type: %s" % script_type)

        runner_config["script-type"] = script_type
        self.runner_working_dir = self.engine.create_artifact(
            runner_config.get("working-dir", "classes"), "")
        runner_config["working-dir"] = self.runner_working_dir
        runner_config.get("artifacts-dir", self.engine.artifacts_dir)
        runner_config.get("working-dir", self.runner_working_dir)
        runner_config.get("report-file", self.kpi_file)
        runner_config.get("err-file", self.err_jtl)
        runner_config.get("stdout",
                          self.engine.create_artifact("junit", ".out"))
        runner_config.get("stderr",
                          self.engine.create_artifact("junit", ".err"))

        self._cp_resource_files(self.runner_working_dir)

        self.runner = self.runner(runner_config, self.scenario, self.log)
        self.runner.prepare()
        self.reader = JTLReader(self.kpi_file, self.log, self.err_jtl)
        if isinstance(self.engine.aggregator, ConsolidatingAggregator):
            self.engine.aggregator.add_underling(self.reader)

        display_conf = self.settings.get("virtual-display")
        if display_conf:
            if is_windows():
                self.log.warning(
                    "Cannot have virtual display on Windows, ignoring")
            else:
                width = display_conf.get("width", 1024)
                height = display_conf.get("height", 768)
                self.virtual_display = Display(size=(width, height))
Exemplo n.º 10
0
    def test_jtl_quoting_issue(self):
        self.obj.execution.merge({
            "concurrency": 1,
            "iterations": 1,
            "scenario": {
                "default-address": "http://httpbin.org/status/503",
                "requests": ["/"]
            }
        })
        self.obj.prepare()
        self.obj.startup()
        while not self.obj.check():
            time.sleep(self.obj.engine.check_interval)
        self.obj.shutdown()
        self.obj.post_process()

        kpi_path = os.path.join(self.obj.engine.artifacts_dir, "kpi.jtl")
        self.assertTrue(os.path.exists(kpi_path))

        reader = JTLReader(kpi_path, self.obj.log, None)
        for point in reader.datapoints():
            pass
Exemplo n.º 11
0
    def test_jtl_quoting_issue(self):
        self.configure({"execution": {
            "concurrency": 1,
            "iterations": 1,
            "scenario": {
                "default-address": "http://httpbin.org/status/503",
                "requests": [
                    "/"
                ]
            }
        }})
        self.obj.prepare()
        self.obj.startup()
        while not self.obj.check():
            time.sleep(self.obj.engine.check_interval)
        self.obj.shutdown()
        self.obj.post_process()

        kpi_path = os.path.join(self.obj.engine.artifacts_dir, "kpi.jtl")
        self.assertTrue(os.path.exists(kpi_path))

        reader = JTLReader(kpi_path, self.obj.log)
        list(reader.datapoints())
Exemplo n.º 12
0
    def prepare(self):
        """
        1) Locate script or folder
        2) detect script type
        3) create runner instance, prepare runner
        """
        self.scenario = self.get_scenario()
        if "requests" in self.scenario:
            if self.scenario.get("requests"):
                self.scenario["script"] = self.__tests_from_requests()
            else:
                raise RuntimeError(
                    "Nothing to test, no requests were provided in scenario")
        self.kpi_file = self.engine.create_artifact("selenium_tests_report",
                                                    ".csv")
        self.err_jtl = self.engine.create_artifact("selenium_tests_err",
                                                   ".xml")
        script_type = self.detect_script_type(self.scenario.get("script"))
        runner_config = BetterDict()

        if script_type == ".py":
            self.runner = NoseTester
            runner_config = self.settings.get("selenium-tools").get("nose")

        elif script_type == ".jar" or script_type == ".java":
            self.runner = JunitTester
            runner_config = self.settings.get("selenium-tools").get("junit")

        runner_config["script-type"] = script_type
        self.runner_working_dir = self.engine.create_artifact(
            runner_config.get("working-dir", "classes"), "")
        runner_config["working-dir"] = self.runner_working_dir
        runner_config.get("artifacts-dir", self.engine.artifacts_dir)
        runner_config.get("working-dir", self.runner_working_dir)
        runner_config.get("report-file", self.kpi_file)
        runner_config.get("err-file", self.err_jtl)
        runner_config.get("stdout",
                          self.engine.create_artifact("junit", ".out"))
        runner_config.get("stderr",
                          self.engine.create_artifact("junit", ".err"))

        self._cp_resource_files(self.runner_working_dir)

        self.runner = self.runner(runner_config, self.scenario, self.log)
        self.runner.prepare()
        self.reader = JTLReader(self.kpi_file, self.log, self.err_jtl)
        if isinstance(self.engine.aggregator, ConsolidatingAggregator):
            self.engine.aggregator.add_underling(self.reader)
Exemplo n.º 13
0
    def prepare(self):
        """
        1) Locate script or folder
        2) detect script type
        3) create runner instance, prepare runner
        """
        scenario = self.get_scenario()
        self.kpi_file = self.engine.create_artifact("selenium_tests_report",
                                                    ".csv")
        script_type, script_is_folder = self.detect_script_type(
            scenario.get("script"))
        runner_config = BetterDict()

        if script_type == ".py":
            self.runner = NoseTester
            runner_config = self.settings.get("selenium-tools").get("nose")

        elif script_type == ".jar" or script_type == ".java":
            self.runner = JunitTester
            runner_config = self.settings.get("selenium-tools").get("junit")

        runner_config["script-type"] = script_type
        runner_working_dir = self.engine.create_artifact(
            runner_config.get("working-dir", "classes"), "")
        runner_config["working-dir"] = runner_working_dir
        runner_config.get("artifacts-dir", self.engine.artifacts_dir)
        runner_config.get("working-dir", runner_working_dir)
        runner_config.get("report-file", self.kpi_file)
        runner_config.get("stdout",
                          self.engine.create_artifact("junit", ".out"))
        runner_config.get("stderr",
                          self.engine.create_artifact("junit", ".err"))

        if Scenario.SCRIPT in scenario:
            if script_is_folder:
                shutil.copytree(scenario.get("script"), runner_working_dir)
            else:
                os.makedirs(runner_working_dir)
                shutil.copy2(scenario.get("script"), runner_working_dir)

        self.runner = self.runner(runner_config, scenario, self.log)
        self.runner.prepare()
        self.reader = JTLReader(self.kpi_file, self.log, None)
        if isinstance(self.engine.aggregator, ConsolidatingAggregator):
            self.engine.aggregator.add_underling(self.reader)
Exemplo n.º 14
0
    def check(self):
        for line in self._tailer.get_lines():
            if "Adding worker" in line:
                marker = "results="
                pos = line.index(marker)
                fname = line[pos + len(marker):].strip()
                self.log.debug("Adding result reader for %s", fname)
                if not self.engine.is_functional_mode():
                    reader = JTLReader(fname, self.log)
                    if isinstance(self.engine.aggregator, ConsolidatingAggregator):
                        self.engine.aggregator.add_underling(reader)
                else:
                    reader = FuncSamplesReader(self.report_file, self.engine, self.log)
                    if isinstance(self.engine.aggregator, FunctionalAggregator):
                        self.engine.aggregator.add_underling(reader)
                self._readers.append(reader)

        return super(ApiritifNoseExecutor, self).check()
Exemplo n.º 15
0
    def _get_reader(self):
        with open(self.data_file) as fhd:
            header = fhd.readline(2048).strip()  # just header chunk of file

        if header.startswith(self.AB_HEADER):
            reader = TSVDataReader(self.data_file, self.log)
            reader.url_label = "N/A"
            return reader
        elif header.startswith("<?xml"):
            return XMLJTLReader(self.data_file, self.log)
        elif header.startswith("RUN\t") or "\tRUN\t" in header:
            return GatlingLogReader(self.data_file, self.log, None)
        elif "timestamp" in header.lower() and "elapsed" in header.lower():
            return JTLReader(self.data_file, self.log, self.errors_file)
        elif re.match("^[0-9]{19},", header):
            # Vegeta CSV does not have a header, every line starts with a timestamp in nanoseconds
            return VegetaLogReader(self.data_file, self.log)
        else:
            self.log.info("Header line was: %s", header)
            raise TaurusInternalException("Unable to detect results format for: %s" % self.data_file)
Exemplo n.º 16
0
 def test_tabs_jtl(self):
     obj = JTLReader(RESOURCES_DIR + "/jmeter/jtl/tabs.jtl", logging.getLogger(''), None)
     values = [x for x in obj.datapoints(True)]
     self.assertEquals(4, len(values))
Exemplo n.º 17
0
class TestJTLReader(BZTestCase):
    def setUp(self):
        super(TestJTLReader, self).setUp()
        self.obj = None

    def configure(self, jtl_file):
        self.obj = JTLReader(jtl_file, ROOT_LOGGER)

    def tearDown(self):
        if self.obj:
            close_reader_file(self.obj.csvreader)
            close_reader_file(self.obj.errors_reader)
        super(TestJTLReader, self).tearDown()

    def test_tranctl_jtl(self):
        self.configure(RESOURCES_DIR + "/jmeter/jtl/tranctl.jtl")
        values = [x for x in self.obj.datapoints(final_pass=True)]
        self.assertEquals(1, len(values))

    def test_tabs_jtl(self):
        self.configure(RESOURCES_DIR + "/jmeter/jtl/tabs.jtl")
        values = [x for x in self.obj.datapoints(final_pass=True)]
        self.assertEquals(4, len(values))

    def test_reader_unicode(self):
        self.configure(RESOURCES_DIR + "/jmeter/jtl/unicode.jtl")
        self.obj.ignored_labels = [u"Тест.Эхо"]
        for point in self.obj.datapoints(final_pass=True):
            cumulative = point[DataPoint.CUMULATIVE]
            self.assertIn(u"САП.АутентифицироватьРасш", cumulative)
            self.assertNotIn(u"Тест.Эхо", cumulative)

    def test_jtl_doublequoting(self):
        self.configure(RESOURCES_DIR + "/jmeter/jtl/doublequoting.jtl")
        list(self.obj.datapoints(final_pass=True))

    def test_jtl_csv_sniffer_unicode_crash(self):
        self.configure(RESOURCES_DIR + "/jmeter/jtl/quote-guessing-crash.jtl")
        list(self.obj.datapoints(final_pass=True))

    def test_stdev_performance(self):
        start = time.time()
        self.configure(RESOURCES_DIR + "/jmeter/jtl/slow-stdev.jtl")
        res = list(self.obj.datapoints(final_pass=True))
        lst_json = to_json(res)

        self.assertNotIn('"perc": {},', lst_json)

        elapsed = time.time() - start
        ROOT_LOGGER.debug("Elapsed/per datapoint: %s / %s", elapsed, elapsed / len(res))
        # self.assertLess(elapsed, len(res))  # less than 1 datapoint per sec is a no-go
        exp = [2.2144798867972773,
               0.7207704268609725,
               0.606834452578833,
               0.8284089170237546,
               0.5858142211763572,
               0.622922628329711,
               0.5529488620851849,
               0.6933748292117727,
               0.4876162181858197,
               0.42471180222446503,
               0.2512251128133865]
        self.assertEqual(exp, [x[DataPoint.CURRENT][''][KPISet.STDEV_RESP_TIME] for x in res])

    def test_kpiset_trapped_getitem(self):
        def new():
            subj = KPISet(perc_levels=(100.0,))
            subj[KPISet.RESP_TIMES].add(0.1)
            subj[KPISet.RESP_TIMES].add(0.01)
            subj[KPISet.RESP_TIMES].add(0.001)
            subj.recalculate()
            return subj

        def enc_dec_iter(vals):
            vals = list(vals)
            dct = {x[0]: x[1] for x in vals}
            jsoned = to_json(dct)
            return json.loads(jsoned)

        exp = {u'avg_ct': 0,
               u'avg_lt': 0,
               u'avg_rt': 0,
               u'bytes': 0,
               u'concurrency': 0,
               u'errors': [],
               u'fail': 0,
               u'perc': {u'100.0': 0.1},
               u'rc': {},
               u'rt': {u'0.001': 1, u'0.01': 1, u'0.1': 1},
               u'stdev_rt': 0.058 if PY2 else 0.05802585630561603,
               u'succ': 0,
               u'throughput': 0}

        self.assertEqual(exp, enc_dec_iter(new().items()))
        if PY2:
            self.assertEqual(exp, enc_dec_iter(new().viewitems()))
            self.assertEqual(exp, enc_dec_iter(new().iteritems()))
        self.assertEqual('{"100.0": 0.1}', to_json(new().get(KPISet.PERCENTILES), indent=None))
Exemplo n.º 18
0
 def register_file(self, report_filename):
     reader = JTLReader(report_filename, self.log)
     self.add_underling(reader)
Exemplo n.º 19
0
 def prepare(self):
     super(MyCustomExecutor, self).prepare()
     self.reporting_setup(suffix='.csv')
     self.reader = JTLReader(self.report_file, self.log)
     if isinstance(self.engine.aggregator, ConsolidatingAggregator):
         self.engine.aggregator.add_underling(self.reader)
Exemplo n.º 20
0
 def configure(self, jtl_file):
     self.obj = JTLReader(jtl_file, logging.getLogger(''))
Exemplo n.º 21
0
 def configure(self, jtl_file):
     self.obj = JTLReader(jtl_file, ROOT_LOGGER)
Exemplo n.º 22
0
 def register_file(self, report_filename):
     self.filenames.append(report_filename)
     reader = JTLReader(report_filename, self.log)
     self.readers.append(reader)
Exemplo n.º 23
0
class TestJTLReader(BZTestCase):
    def setUp(self):
        super(TestJTLReader, self).setUp()
        self.obj = None

    def configure(self, jtl_file):
        self.obj = JTLReader(jtl_file, ROOT_LOGGER)

    def tearDown(self):
        if self.obj:
            close_reader_file(self.obj.csvreader)
            close_reader_file(self.obj.errors_reader)
        super(TestJTLReader, self).tearDown()

    def test_tranctl_jtl(self):
        self.configure(RESOURCES_DIR + "/jmeter/jtl/tranctl.jtl")
        values = [x for x in self.obj.datapoints(final_pass=True)]
        self.assertEquals(1, len(values))

    def test_tabs_jtl(self):
        self.configure(RESOURCES_DIR + "/jmeter/jtl/tabs.jtl")
        values = [x for x in self.obj.datapoints(final_pass=True)]
        self.assertEquals(4, len(values))

    def test_reader_unicode(self):
        self.configure(RESOURCES_DIR + "/jmeter/jtl/unicode.jtl")
        self.obj.ignored_labels = [u"Тест.Эхо"]
        for point in self.obj.datapoints(final_pass=True):
            cumulative = point[DataPoint.CUMULATIVE]
            self.assertIn(u"САП.АутентифицироватьРасш", cumulative)
            self.assertNotIn(u"Тест.Эхо", cumulative)

    def test_jtl_doublequoting(self):
        self.configure(RESOURCES_DIR + "/jmeter/jtl/doublequoting.jtl")
        list(self.obj.datapoints(final_pass=True))

    def test_jtl_csv_sniffer_unicode_crash(self):
        self.configure(RESOURCES_DIR + "/jmeter/jtl/quote-guessing-crash.jtl")
        list(self.obj.datapoints(final_pass=True))

    def test_stdev_performance(self):
        start = time.time()
        self.configure(RESOURCES_DIR + "jmeter/jtl/slow-stdev.jtl")
        res = list(self.obj.datapoints(final_pass=True))
        lst_json = to_json(res)

        self.assertNotIn('"perc": {},', lst_json)

        elapsed = time.time() - start
        ROOT_LOGGER.debug("Elapsed/per datapoint: %s / %s", elapsed,
                          elapsed / len(res))
        # self.assertLess(elapsed, len(res))  # less than 1 datapoint per sec is a no-go
        exp = [
            0.53060066889723, 0.39251356581014, 0.388405157629,
            0.38927586980868, 0.30511697736531, 0.21160424043633,
            0.07339064994943
        ]
        self.assertEqual(exp, [
            round(x[DataPoint.CURRENT][''][KPISet.STDEV_RESP_TIME], 14)
            for x in res
        ])

    def test_kpiset_trapped_getitem(self):
        def new():
            subj = KPISet(perc_levels=(100.0, ))
            subj[KPISet.RESP_TIMES].add(0.1)
            subj[KPISet.RESP_TIMES].add(0.01)
            subj[KPISet.RESP_TIMES].add(0.001)
            subj.recalculate()
            return subj

        def enc_dec_iter(vals):
            vals = list(vals)
            dct = {x[0]: x[1] for x in vals}
            jsoned = to_json(dct)
            return json.loads(jsoned)

        exp = {
            u'avg_ct': 0,
            u'avg_lt': 0,
            u'avg_rt': 0,
            u'bytes': 0,
            u'concurrency': 0,
            u'errors': [],
            u'fail': 0,
            u'perc': {
                u'100.0': 0.1
            },
            u'rc': {},
            u'rt': {
                u'0.001': 1,
                u'0.01': 1,
                u'0.1': 1
            },
            u'stdev_rt': 0.05802585630561603,
            u'succ': 0,
            u'throughput': 0
        }

        self.assertEqual(exp, enc_dec_iter(new().items()))
        self.assertEqual('{"100.0": 0.1}',
                         to_json(new().get(KPISet.PERCENTILES), indent=None))
Exemplo n.º 24
0
 def test_tranctl_jtl(self):
     obj = JTLReader(__dir__() + "/../data/tranctl.jtl", logging.getLogger(''), None)
     values = [x for x in obj.datapoints(True)]
     self.assertEquals(1, len(values))
Exemplo n.º 25
0
 def configure(self, jtl_file):
     self.obj = JTLReader(jtl_file, ROOT_LOGGER)
Exemplo n.º 26
0
class TestJTLReader(BZTestCase):
    def setUp(self):
        super(TestJTLReader, self).setUp()
        self.obj = None

    def configure(self, jtl_file):
        self.obj = JTLReader(jtl_file, ROOT_LOGGER)

    def tearDown(self):
        if self.obj:
            close_reader_file(self.obj.csvreader)
            close_reader_file(self.obj.errors_reader)
        super(TestJTLReader, self).tearDown()

    def test_tranctl_jtl(self):
        self.configure(RESOURCES_DIR + "/jmeter/jtl/tranctl.jtl")
        values = [x for x in self.obj.datapoints(final_pass=True)]
        self.assertEquals(1, len(values))

    def test_tabs_jtl(self):
        self.configure(RESOURCES_DIR + "/jmeter/jtl/tabs.jtl")
        values = [x for x in self.obj.datapoints(final_pass=True)]
        self.assertEquals(4, len(values))

    def test_reader_unicode(self):
        self.configure(RESOURCES_DIR + "/jmeter/jtl/unicode.jtl")
        self.obj.ignored_labels = [u"Тест.Эхо"]
        for point in self.obj.datapoints(final_pass=True):
            cumulative = point[DataPoint.CUMULATIVE]
            self.assertIn(u"САП.АутентифицироватьРасш", cumulative)
            self.assertNotIn(u"Тест.Эхо", cumulative)

    def test_jtl_doublequoting(self):
        self.configure(RESOURCES_DIR + "/jmeter/jtl/doublequoting.jtl")
        list(self.obj.datapoints(final_pass=True))

    def test_jtl_csv_sniffer_unicode_crash(self):
        self.configure(RESOURCES_DIR + "/jmeter/jtl/quote-guessing-crash.jtl")
        list(self.obj.datapoints(final_pass=True))

    def test_stdev_performance(self):
        start = time.time()
        self.configure(RESOURCES_DIR + "/jmeter/jtl/slow-stdev.jtl")
        res = list(self.obj.datapoints(final_pass=True))
        lst_json = to_json(res)

        self.assertNotIn('"perc": {},', lst_json)

        elapsed = time.time() - start
        ROOT_LOGGER.debug("Elapsed/per datapoint: %s / %s", elapsed,
                          elapsed / len(res))
        # self.assertLess(elapsed, len(res))  # less than 1 datapoint per sec is a no-go
        exp = [
            2.2144798867972773, 0.7207704268609725, 0.606834452578833,
            0.8284089170237546, 0.5858142211763572, 0.622922628329711,
            0.5529488620851849, 0.6933748292117727, 0.4876162181858197,
            0.42471180222446503, 0.2512251128133865
        ]
        self.assertEqual(
            exp,
            [x[DataPoint.CURRENT][''][KPISet.STDEV_RESP_TIME] for x in res])

    def test_kpiset_trapped_getitem(self):
        def new():
            subj = KPISet()
            subj.perc_levels = (100.0, )
            subj[KPISet.RESP_TIMES].add(0.1)
            subj[KPISet.RESP_TIMES].add(0.01)
            subj[KPISet.RESP_TIMES].add(0.001)
            subj.recalculate()
            return subj

        def enc_dec_iter(vals):
            vals = list(vals)
            dct = {x[0]: x[1] for x in vals}
            jsoned = to_json(dct)
            return json.loads(jsoned)

        exp = {
            u'avg_ct': 0,
            u'avg_lt': 0,
            u'avg_rt': 0,
            u'bytes': 0,
            u'concurrency': 0,
            u'errors': [],
            u'fail': 0,
            u'perc': {
                u'100.0': 0.1
            },
            u'rc': {},
            u'rt': {
                u'0.001': 1,
                u'0.01': 1,
                u'0.1': 1
            },
            u'stdev_rt': 0.058 if PY2 else 0.05802585630561603,
            u'succ': 0,
            u'throughput': 0
        }

        self.assertEqual(exp, enc_dec_iter(new().items()))
        if PY2:
            self.assertEqual(exp, enc_dec_iter(new().viewitems()))
            self.assertEqual(exp, enc_dec_iter(new().iteritems()))
        self.assertEqual('{"100.0": 0.1}',
                         to_json(new().get(KPISet.PERCENTILES), indent=None))
Exemplo n.º 27
0
 def create_load_reader(self, report_file):
     return JTLReader(report_file, self.log, None)
Exemplo n.º 28
0
 def test_reader_unicode(self):
     reader = JTLReader(RESOURCES_DIR + "/jmeter/jtl/unicode.jtl", logging.getLogger(''), None)
     reader.ignored_labels = [u("Тест.Эхо")]
     for point in reader.datapoints():
         cumulative = point[DataPoint.CUMULATIVE]
         self.assertNotIn("Тест.Эхо", cumulative)
Exemplo n.º 29
0
 def configure(self, jtl_file):
     self.obj = JTLReader(jtl_file, logging.getLogger(''))
Exemplo n.º 30
0
 def test_jtl_doublequoting(self):
     obj = JTLReader(RESOURCES_DIR + "/jmeter/jtl/doublequoting.jtl", logging.getLogger(), None)
     list(obj.datapoints(True))