コード例 #1
0
    def test_locust_master(self):
        self.configure({"execution": {
            "concurrency": 1,
            "iterations": 10,
            "hold-for": 30,
            "master": True,
            "slaves": 1,
            "scenario": {
                "default-address": "http://blazedemo.com",
                "script": RESOURCES_DIR + "locust/simple.py"
            }
        }})

        self.obj.prepare()
        self.obj.startup()
        self.obj.get_widget()
        try:
            self.obj.check()
            time.sleep(2)
            self.obj.check()
        except RuntimeError:
            ROOT_LOGGER.warning("Do you use patched locust for non-GUI master?")
        self.obj.shutdown()
        self.obj.post_process()
        self.assertFalse(self.obj.has_results())
コード例 #2
0
ファイル: cases.py プロジェクト: mryan43/taurus
    def assertFilesEqual(expected,
                         actual,
                         replace_str="",
                         replace_with="",
                         python_files=False):
        with open(expected) as exp, open(actual) as act:
            act_lines = [
                x.replace(replace_str, replace_with).rstrip()
                for x in act.readlines()
            ]
            exp_lines = [
                x.replace(replace_str, replace_with).rstrip()
                for x in exp.readlines()
            ]
            if python_files:
                act_lines = astunparse.unparse(ast.parse(
                    '\n'.join(act_lines))).split('\n')
                exp_lines = astunparse.unparse(ast.parse(
                    '\n'.join(exp_lines))).split('\n')

            diff = list(difflib.unified_diff(exp_lines, act_lines))
            if diff:
                ROOT_LOGGER.info("Replacements are: %s => %s", replace_str,
                                 replace_with)
                msg = "Failed asserting that two files are equal:\n%s\nversus\n%s\nDiff is:\n\n%s"
                raise AssertionError(msg % (actual, expected, "\n".join(diff)))
コード例 #3
0
ファイル: test_configuration.py プロジェクト: xmeng1/taurus
    def test_merge(self):
        obj = Configuration()
        configs = [
            RESOURCES_DIR + "yaml/test.yml",
            RESOURCES_DIR + "json/merge1.json",
            RESOURCES_DIR + "json/merge2.json",
        ]
        obj.load(configs)
        fname = tempfile.mkstemp()[1]
        obj.dump(fname, Configuration.JSON)
        with open(fname) as fh:
            ROOT_LOGGER.debug("JSON:\n%s", fh.read())
        jmeter = obj['modules']['jmeter']
        classval = jmeter['class']
        self.assertEquals("bzt.modules.jmeter.JMeterExecutor", classval)
        self.assertEquals("value", obj['key'])
        self.assertEquals(6, len(obj["list-append"]))
        self.assertEquals(2, len(obj["list-replace"]))
        self.assertEquals(2, len(obj["list-replace-notexistent"]))
        self.assertIsInstance(obj["list-complex"][1][0], BetterDict)
        self.assertIsInstance(obj["list-complex"][1][0], BetterDict)
        self.assertIsInstance(obj["list-complex"][1][0], BetterDict)
        self.assertFalse("properties" in jmeter)

        fname = tempfile.mkstemp()[1]
        obj.dump(fname, Configuration.JSON)
        checker = Configuration()
        checker.load([fname])
        token = checker["list-complex"][1][0]['token']
        self.assertNotEquals('test', token)
        token_orig = obj["list-complex"][1][0]['token']
        self.assertEquals('test', token_orig)
コード例 #4
0
    def test_locust_master(self):
        self.configure({
            "execution": {
                "concurrency": 1,
                "iterations": 10,
                "hold-for": 30,
                "master": True,
                "slaves": 1,
                "scenario": {
                    "default-address": "http://blazedemo.com",
                    "script": RESOURCES_DIR + "locust/simple.py"
                }
            }
        })

        self.obj.prepare()
        self.obj.startup()
        self.obj.get_widget()
        try:
            self.obj.check()
            time.sleep(2)
            self.obj.check()
        except RuntimeError:
            ROOT_LOGGER.warning(
                "Do you use patched locust for non-GUI master?")
        self.obj.shutdown()
        self.obj.post_process()
        self.assertFalse(self.obj.has_results())
コード例 #5
0
ファイル: test_java.py プロジェクト: yurifrl/taurus
 def test_not_junit(self):
     """
     Check that JUnit runner fails if no tests were found
     :return:
     """
     self.configure({
         EXEC: {
             "executor": "selenium",
             "scenario": {
                 "script":
                 RESOURCES_DIR + "selenium/invalid/NotJUnittest.java"
             }
         }
     })
     self.obj.prepare()
     self.assertIsInstance(self.obj.runner, JUnitTester)
     self.obj.startup()
     try:
         while not self.obj.check():
             time.sleep(self.obj.engine.check_interval)
         self.fail()
     except ToolError as exc:
         diagnostics = "\n".join(exc.diagnostics)
         self.assertIn("Nothing to test", diagnostics)
     except BaseException as exc:
         ROOT_LOGGER.debug(traceback.format_exc())
         self.fail("Unexpected exception %s, expected ToolError" % exc)
     self.obj.shutdown()
コード例 #6
0
    def test_merge(self):
        obj = Configuration()
        configs = [
            RESOURCES_DIR + "yaml/test.yml",
            RESOURCES_DIR + "json/merge1.json",
            RESOURCES_DIR + "json/merge2.json",
        ]
        obj.load(configs)
        fname = tempfile.mkstemp()[1]
        obj.dump(fname, Configuration.JSON)
        with open(fname) as fh:
            ROOT_LOGGER.debug("JSON:\n%s", fh.read())
        jmeter = obj['modules']['jmeter']
        classval = jmeter['class']
        self.assertEquals("bzt.modules.jmeter.JMeterExecutor", classval)
        self.assertEquals("value", obj['key'])
        self.assertEquals(6, len(obj["list-append"]))
        self.assertEquals(2, len(obj["list-replace"]))
        self.assertEquals(2, len(obj["list-replace-notexistent"]))
        self.assertIsInstance(obj["list-complex"][1][0], BetterDict)
        self.assertIsInstance(obj["list-complex"][1][0], BetterDict)
        self.assertIsInstance(obj["list-complex"][1][0], BetterDict)
        self.assertFalse("properties" in jmeter)

        fname = tempfile.mkstemp()[1]
        obj.dump(fname, Configuration.JSON)
        checker = Configuration()
        checker.load([fname])
        token = checker["list-complex"][1][0]['token']
        self.assertNotEquals('test', token)
        token_orig = obj["list-complex"][1][0]['token']
        self.assertEquals('test', token_orig)
コード例 #7
0
ファイル: test_aggregator.py プロジェクト: andy7i/taurus
    def test_speed(self):
        obj = self.obj

        mock = MockReader()
        mock.buffer_scale_idx = '100.0'
        obj.add_listener(mock)

        res = {}
        # current measurements shows ~25K samples/sec
        for cnt in (10, 100, 1000, 10000, 25000, 40000, 50000):
            for a in range(0, cnt):
                sample = (cnt, "", 1, r(1000), r(1000), r(1000), rc(), err(), '', 0)
                mock.data.append(sample)
            before = time.time()
            for point in mock.datapoints():
                pass
            after = time.time()
            res[cnt] = after - before
            ROOT_LOGGER.info("Times: %s", res)

            while mock.results:
                point = mock.results.pop(0)
                overall = point[DataPoint.CURRENT]['']
                self.assertTrue(len(overall[KPISet.PERCENTILES]) > 0)

        for point in mock.datapoints(True):
            pass
コード例 #8
0
 def test_schedule_empty(self):
     # concurrency: 1, iterations: 1
     scheduler = self.get_scheduler(b("4 test\ntest\n"))
     items = list(scheduler.generate())
     for item in items:
         ROOT_LOGGER.debug("Item: %s", item)
     self.assertEqual(1, len(items))
コード例 #9
0
ファイル: test_pbench.py プロジェクト: andy7i/taurus
 def test_schedule_empty(self):
     # concurrency: 1, iterations: 1
     scheduler = self.get_scheduler(b("4 test\ntest\n"))
     items = list(scheduler.generate())
     for item in items:
         ROOT_LOGGER.debug("Item: %s", item)
     self.assertEqual(1, len(items))
コード例 #10
0
ファイル: test_pbench.py プロジェクト: yurifrl/taurus
    def test_schedule_rps(self):
        rps = 9
        rampup = 12
        self.configure({
            "provisioning": "test",
            EXEC: {
                "throughput": rps,
                "ramp-up": rampup,
                "steps": 3,
                "hold-for": 0
            }
        })
        scheduler = self.get_scheduler(b("4 test\ntest\n"))

        cnt = 0
        cur = 0
        currps = 0
        for item in scheduler.generate():
            if int(math.ceil(item[0])) != cur:
                # self.assertLessEqual(currps, rps)
                cur = int(math.ceil(item[0]))
                ROOT_LOGGER.debug("RPS: %s", currps)
                currps = 0

            cnt += 1
            currps += 1

        ROOT_LOGGER.debug("RPS: %s", currps)
コード例 #11
0
ファイル: test_aggregator.py プロジェクト: zeesattarny/taurus
    def test_speed(self):
        obj = self.obj

        mock = MockReader()
        mock.buffer_scale_idx = '100.0'
        obj.add_listener(mock)

        res = {}
        # current measurements shows ~25K samples/sec
        for cnt in (10, 100, 1000, 10000, 25000, 40000, 50000):
            for a in range(0, cnt):
                sample = (cnt, "", 1, r(1000), r(1000), r(1000), rc(), err(),
                          '', 0)
                mock.data.append(sample)
            before = time.time()
            for point in mock.datapoints():
                pass
            after = time.time()
            res[cnt] = after - before
            ROOT_LOGGER.info("Times: %s", res)

            while mock.results:
                point = mock.results.pop(0)
                overall = point[DataPoint.CURRENT]['']
                self.assertTrue(len(overall[KPISet.PERCENTILES]) > 0)

        for point in mock.datapoints(True):
            pass
コード例 #12
0
ファイル: test_pbench.py プロジェクト: yurifrl/taurus
 def aggregated_second(self, data):
     current = data[DataPoint.CURRENT]['']
     ROOT_LOGGER.info("DataPoint %s: VU:%s RPS:%s/%s RT:%s",
                      data[DataPoint.TIMESTAMP],
                      current[KPISet.CONCURRENCY],
                      current[KPISet.SAMPLE_COUNT],
                      current[KPISet.FAILURES],
                      current[KPISet.AVG_RESP_TIME])
コード例 #13
0
ファイル: cases.py プロジェクト: yue530tom/taurus
    def assertFilesEqual(expected,
                         actual,
                         replace_str="",
                         replace_with="",
                         python_files=False):
        def order(line):
            line = line.replace(',', ' ,')  # for imports
            line = line.replace('(', '( ')  # for
            line = line.replace(')', ' )')  # calls
            line = line.split(" ")
            line.sort()
            return ' '.join(line)

        def equal_by_content(diff):
            # todo: it doesn't show diff for follow case, shouldn't we fix it?
            # 01: + func1()
            # 02:   func2()
            # 03: - func1()
            # func1 moved and order has been changed
            act_lines = [line[1:] for line in diff if line.startswith('-')]
            exp_lines = [line[1:] for line in diff if line.startswith('+')]
            for pair in zip(act_lines, exp_lines):
                if order(pair[0]) != order(pair[1]):
                    return False

            return True

        if isinstance(replace_str, str):
            replace_str = [replace_str]
        if isinstance(replace_with, str):
            replace_with = [replace_with]
        with open(expected) as exp, open(actual) as act:
            act_lines = act.readlines()
            exp_lines = exp.readlines()

        subs = dict(zip(replace_str, replace_with))
        subs.update({'<': '< ', '>': ' >'})  # for xml

        for key in subs:
            act_lines = [x.replace(key, subs[key]).rstrip() for x in act_lines]
            exp_lines = [x.replace(key, subs[key]).rstrip() for x in exp_lines]

        if python_files:
            act_lines = astunparse.unparse(ast.parse(
                '\n'.join(act_lines))).split('\n')
            exp_lines = astunparse.unparse(ast.parse(
                '\n'.join(exp_lines))).split('\n')

        diff = list(difflib.unified_diff(exp_lines, act_lines))

        if diff and not equal_by_content(diff[2:]):
            ROOT_LOGGER.info("Replacements are: %s => %s", replace_str,
                             replace_with)
            msg = "Failed asserting that two files are equal:\n%s\nversus\n%s\nDiff is:\n\n%s"
            # here we show full diff, even equal_by_content
            # todo: show only really different lines

            raise AssertionError(msg % (actual, expected, "\n".join(diff)))
コード例 #14
0
ファイル: test_pbench.py プロジェクト: andy7i/taurus
 def aggregated_second(self, data):
     current = data[DataPoint.CURRENT]['']
     ROOT_LOGGER.info(
         "DataPoint %s: VU:%s RPS:%s/%s RT:%s",
         data[DataPoint.TIMESTAMP],
         current[KPISet.CONCURRENCY],
         current[KPISet.SAMPLE_COUNT],
         current[KPISet.FAILURES],
         current[KPISet.AVG_RESP_TIME])
コード例 #15
0
ファイル: test_configuration.py プロジェクト: xmeng1/taurus
 def test_save(self):
     obj = Configuration()
     obj.merge({"str": "text", "uc": six.u("ucstring")})
     fname = tempfile.mkstemp()[1]
     obj.dump(fname, Configuration.YAML)
     with open(fname) as fh:
         written = fh.read()
         ROOT_LOGGER.debug("YAML:\n%s", written)
         self.assertNotIn("unicode", written)
コード例 #16
0
 def test_overwrite_execution_locations(self):
     obj = Configuration()
     obj.merge({
         "execution": [{"locations": {"us-central1-a": 1}}],
     })
     obj.merge({
         "$execution": [{"~locations": {"harbor-1": 1}}],
     })
     ROOT_LOGGER.info(obj)
     self.assertEqual(obj, {"execution": [{"locations": {"harbor-1": 1}}]})
コード例 #17
0
 def test_overwrite_execution_locations(self):
     obj = Configuration()
     obj.merge({
         "execution": [{"locations": {"us-central1-a": 1}}],
     })
     obj.merge({
         "$execution": [{"~locations": {"harbor-1": 1}}],
     })
     ROOT_LOGGER.info(obj)
     self.assertEqual(obj, {"execution": [{"locations": {"harbor-1": 1}}]})
コード例 #18
0
ファイル: test_monitoring.py プロジェクト: yue530tom/taurus
    def test_server_agent(self):
        obj = Monitoring()
        obj.engine = EngineEmul()
        obj.parameters.merge({
            "server-agent": [{
                "address": "127.0.0.1:4444",
                "logging": "True",
                "metrics": [
                    "cpu",
                    "disks"
                ]
            }, {
                "address": "10.0.0.1",
                "metrics": [
                    "something1",
                    "something2"
                ]
            }]
        })

        listener = LoggingMonListener()
        obj.add_listener(listener)

        widget = obj.get_widget()
        obj.add_listener(widget)

        crit_conf = BetterDict.from_dict({"condition": ">", "threshold": 5, "subject": "127.0.0.1:4444/cpu"})
        criteria = MonitoringCriteria(crit_conf, obj)
        obj.add_listener(criteria)

        obj.client_classes = {'server-agent': ServerAgentClientEmul}

        obj.prepare()
        obj.startup()

        for i in range(1, 10):
            obj.clients[0].socket.recv_data += b("%s\t%s\t\n" % (i, i*10))
            obj.check()
            ROOT_LOGGER.debug("Criteria state: %s", criteria)
            time.sleep(obj.engine.check_interval)

        obj.shutdown()
        obj.post_process()

        self.assertEquals(b("test\ninterval:1\nmetrics:cpu\tdisks\nexit\n"), obj.clients[0].socket.sent_data)

        if PY3:
            self.assertIsNotNone(obj.clients[0].logs_file)
            with open(obj.clients[0].logs_file) as serveragent_logs:
                logs_reader = csv.reader(serveragent_logs)
                logs_reader = list(logs_reader)
            self.assertEquals(['ts', 'cpu', 'disks'], logs_reader[0])
            for i in range(1, 10):
                self.assertEquals([str(i), str(i * 10)], logs_reader[i][1:])
コード例 #19
0
 def test_save(self):
     obj = Configuration()
     obj.merge({
         "str": "text",
         "uc": six.u("ucstring")
     })
     fname = tempfile.mkstemp()[1]
     obj.dump(fname, Configuration.YAML)
     with open(fname) as fh:
         written = fh.read()
         ROOT_LOGGER.debug("YAML:\n%s", written)
         self.assertNotIn("unicode", written)
コード例 #20
0
ファイル: test_pbench.py プロジェクト: yurifrl/taurus
    def test_simple(self):
        self.configure({
            "provisioning": "test",
            EXEC: {
                "log-responses": "proto_error",
                # "iterations": 5000000,
                "concurrency": 10,
                "throughput": 1000,
                "ramp-up": "1m",
                # "steps": 5,
                "hold-for": "15",
                "scenario": {
                    "timeout":
                    1,
                    "default-address":
                    "http://localhost:33",
                    "headers": {
                        "Connection": "close"
                    },
                    "requests": [{
                        "url": "/api",
                        "method": "POST",
                        "headers": {
                            "Content-Length": 0
                        },
                        "body": {
                            "param": "value"
                        }
                    }]
                }
            }
        })

        self.obj.engine.aggregator = ConsolidatingAggregator()
        self.obj.engine.aggregator.engine = self.obj.engine
        self.obj.engine.aggregator.add_listener(DataPointLogger())

        self.obj.engine.aggregator.prepare()
        self.obj.prepare()

        self.obj.engine.aggregator.startup()
        self.obj.startup()

        while not self.obj.check():
            ROOT_LOGGER.debug("Running...")
            self.obj.engine.aggregator.check()
            time.sleep(0.1)

        self.obj.shutdown()
        self.obj.engine.aggregator.shutdown()

        self.obj.post_process()
        self.obj.engine.aggregator.post_process()
コード例 #21
0
ファイル: cases.py プロジェクト: andy7i/taurus
    def assertFilesEqual(expected, actual, replace_str="", replace_with="", python_files=False):
        with open(expected) as exp, open(actual) as act:
            act_lines = [x.replace(replace_str, replace_with).rstrip() for x in act.readlines()]
            exp_lines = [x.replace(replace_str, replace_with).rstrip() for x in exp.readlines()]
            if python_files:
                act_lines = astunparse.unparse(ast.parse('\n'.join(act_lines))).split('\n')
                exp_lines = astunparse.unparse(ast.parse('\n'.join(exp_lines))).split('\n')

            diff = list(difflib.unified_diff(exp_lines, act_lines))
            if diff:
                ROOT_LOGGER.info("Replacements are: %s => %s", replace_str, replace_with)
                msg = "Failed asserting that two files are equal:\n%s\nversus\n%s\nDiff is:\n\n%s"
                raise AssertionError(msg % (actual, expected, "\n".join(diff)))
コード例 #22
0
ファイル: cases.py プロジェクト: vprashanth777/taurus
 def tearDown(self):
     exc, _, _ = sys.exc_info()
     if exc:
         try:
             if hasattr(self, 'obj') and isinstance(self.obj, SelfDiagnosable):
                 diags = self.obj.get_error_diagnostics()
                 if diags:
                     for line in diags:
                         ROOT_LOGGER.info(line)
         except BaseException:
             pass
     if self.captured_logger:
         self.captured_logger.removeHandler(self.log_recorder)
         self.log_recorder.close()
コード例 #23
0
ファイル: cases.py プロジェクト: andy7i/taurus
 def tearDown(self):
     exc, _, _ = sys.exc_info()
     if exc:
         try:
             if hasattr(self, 'obj') and isinstance(self.obj, SelfDiagnosable):
                 diags = self.obj.get_error_diagnostics()
                 if diags:
                     for line in diags:
                         ROOT_LOGGER.info(line)
         except BaseException:
             pass
     if self.captured_logger:
         self.captured_logger.removeHandler(self.log_recorder)
         self.log_recorder.close()
コード例 #24
0
ファイル: test_pbench.py プロジェクト: andy7i/taurus
    def test_simple(self):
        self.configure({
            "provisioning": "test",
            ScenarioExecutor.EXEC: {
                "log-responses": "proto_error",
                # "iterations": 5000000,
                "concurrency": 10,
                "throughput": 1000,
                "ramp-up": "1m",
                # "steps": 5,
                "hold-for": "15",
                "scenario": {
                    "timeout": 1,
                    "default-address": "http://localhost:33",
                    "headers": {
                        "Connection": "close"
                    },
                    "requests": [
                        {
                            "url": "/api",
                            "method": "POST",
                            "headers": {
                                "Content-Length": 0
                            },
                            "body": {
                                "param": "value"}}]}}})

        self.obj.engine.aggregator = ConsolidatingAggregator()
        self.obj.engine.aggregator.engine = self.obj.engine
        self.obj.engine.aggregator.add_listener(DataPointLogger())

        self.obj.engine.aggregator.prepare()
        self.obj.prepare()

        self.obj.engine.aggregator.startup()
        self.obj.startup()

        while not self.obj.check():
            ROOT_LOGGER.debug("Running...")
            self.obj.engine.aggregator.check()
            time.sleep(0.1)

        self.obj.shutdown()
        self.obj.engine.aggregator.shutdown()

        self.obj.post_process()
        self.obj.engine.aggregator.post_process()
コード例 #25
0
    def assertFilesEqual(expected,
                         actual,
                         replace_str="",
                         replace_with="",
                         python_files=False):
        def equal_by_content(difference):
            diff_act, diff_exp = [], []
            for line in difference:
                if line[0] == '-':
                    act_line = line[2:line.rfind('"') + 1].split(" ")[1:]
                    act_line.sort()
                    diff_act.append(act_line)
                elif line[0] == '+':
                    diff_exp.append(line[2:line.rfind('"') + 1].split(" ")[1:])
            if diff_act == diff_exp:
                return True
            else:
                return False

        if isinstance(replace_str, str):
            replace_str = [replace_str]
        if isinstance(replace_with, str):
            replace_with = [replace_with]
        with open(expected) as exp, open(actual) as act:
            act_lines = act.readlines()
            exp_lines = exp.readlines()

        subs = dict(zip(replace_str, replace_with))
        for key in subs:
            act_lines = [x.replace(key, subs[key]).rstrip() for x in act_lines]
            exp_lines = [x.replace(key, subs[key]).rstrip() for x in exp_lines]

        if python_files:
            act_lines = astunparse.unparse(ast.parse(
                '\n'.join(act_lines))).split('\n')
            exp_lines = astunparse.unparse(ast.parse(
                '\n'.join(exp_lines))).split('\n')

        diff = list(difflib.unified_diff(exp_lines, act_lines))

        if diff and not equal_by_content(diff[5:]):
            ROOT_LOGGER.info("Replacements are: %s => %s", replace_str,
                             replace_with)
            msg = "Failed asserting that two files are equal:\n%s\nversus\n%s\nDiff is:\n\n%s"
            raise AssertionError(msg % (actual, expected, "\n".join(diff)))
コード例 #26
0
    def test_binary_unicode_error(self):
        fd, fname = mkstemp()
        os.close(fd)
        file_handler = logging.FileHandler(fname, encoding="utf-8")
        file_handler.setLevel(logging.DEBUG)
        ROOT_LOGGER.addHandler(file_handler)

        try:
            session = Session(data={'id': 1})
            mock = BZMock(session)
            mock.mock_post['https://data.blazemeter.com/api/v4/image/1/files?signature=None'] = {"result": 1}
            with open(RESOURCES_DIR + "jmeter/jmeter-dist-2.13.zip", 'rb') as fds:
                zip_content = fds.read()
            session.upload_file("jtls_and_more.zip", zip_content)
        finally:
            ROOT_LOGGER.removeHandler(file_handler)
            file_handler.close()
            os.remove(fname)
コード例 #27
0
    def test_server_agent(self):
        obj = Monitoring()
        obj.engine = EngineEmul()
        obj.parameters.merge({
            "server-agent": [{
                "address": "127.0.0.1:4444",
                "metrics": ["cpu", "disks"]
            }, {
                "address": "10.0.0.1",
                "metrics": ["something1", "something2"]
            }]
        })

        listener = LoggingMonListener()
        obj.add_listener(listener)

        widget = obj.get_widget()
        obj.add_listener(widget)

        crit_conf = BetterDict.from_dict({
            "condition": ">",
            "threshold": 5,
            "subject": "127.0.0.1:4444/cpu"
        })
        criteria = MonitoringCriteria(crit_conf, obj)
        obj.add_listener(criteria)

        obj.client_classes = {'server-agent': ServerAgentClientEmul}

        obj.prepare()
        obj.startup()

        for _ in range(1, 10):
            obj.clients[0].socket.recv_data += b(
                "%s\t%s\n" % (random.random(), random.random()))
            obj.check()
            ROOT_LOGGER.debug("Criteria state: %s", criteria)
            time.sleep(obj.engine.check_interval)

        obj.shutdown()
        obj.post_process()

        self.assertEquals(b("test\ninterval:1\nmetrics:cpu\tdisks\nexit\n"),
                          obj.clients[0].socket.sent_data)
コード例 #28
0
ファイル: test_passFailStatus.py プロジェクト: andy7i/taurus
    def test_prepare(self):
        config = json.loads(open(RESOURCES_DIR + "json/passfail.json").read())
        self.configure(config['reporting'][0])
        self.obj.prepare()
        self.assertGreater(len(self.obj.criteria), 0)

        for n in range(0, 10):
            point = random_datapoint(n)
            ROOT_LOGGER.info("%s: %s", n, point)
            self.obj.aggregated_second(point)
            try:
                self.obj.check()
            except AutomatedShutdown:
                pass

        try:
            self.obj.post_process()
        except AutomatedShutdown:
            pass
コード例 #29
0
    def test_prepare(self):
        config = json.loads(open(RESOURCES_DIR + "json/passfail.json").read())
        self.configure(config['reporting'][0])
        self.obj.prepare()
        self.assertGreater(len(self.obj.criteria), 0)

        for n in range(0, 10):
            point = random_datapoint(n)
            ROOT_LOGGER.info("%s: %s", n, point)
            self.obj.aggregated_second(point)
            try:
                self.obj.check()
            except AutomatedShutdown:
                pass

        try:
            self.obj.post_process()
        except AutomatedShutdown:
            pass
コード例 #30
0
ファイル: test_pbench.py プロジェクト: infomaven/taurus
    def test_schedule_rps(self):
        self.obj.engine.config.merge({"provisioning": "test"})
        rps = 9
        rampup = 12
        self.obj.execution.merge({"throughput": rps, "ramp-up": rampup, "steps": 3, "hold-for": 0})
        scheduler = self.get_scheduler(b("4 test\ntest\n"))

        cnt = 0
        cur = 0
        currps = 0
        for item in scheduler.generate():
            if int(math.ceil(item[0])) != cur:
                # self.assertLessEqual(currps, rps)
                cur = int(math.ceil(item[0]))
                ROOT_LOGGER.debug("RPS: %s", currps)
                currps = 0

            cnt += 1
            currps += 1

        ROOT_LOGGER.debug("RPS: %s", currps)
コード例 #31
0
    def test_stdev_performance(self):
        start = time.time()
        self.configure(RESOURCES_DIR + "jmeter/jtl/slow-stdev.jtl")
        res = list(self.obj.datapoints(final_pass=True))
        lst_json = to_json(res)

        self.assertNotIn('"perc": {},', lst_json)

        elapsed = time.time() - start
        ROOT_LOGGER.debug("Elapsed/per datapoint: %s / %s", elapsed,
                          elapsed / len(res))
        # self.assertLess(elapsed, len(res))  # less than 1 datapoint per sec is a no-go
        exp = [
            0.53060066889723, 0.39251356581014, 0.388405157629,
            0.38927586980868, 0.30511697736531, 0.21160424043633,
            0.07339064994943
        ]
        self.assertEqual(exp, [
            round(x[DataPoint.CURRENT][''][KPISet.STDEV_RESP_TIME], 14)
            for x in res
        ])
コード例 #32
0
    def test_stdev_performance(self):
        start = time.time()
        self.configure(RESOURCES_DIR + "/jmeter/jtl/slow-stdev.jtl")
        res = list(self.obj.datapoints(final_pass=True))
        lst_json = to_json(res)

        self.assertNotIn('"perc": {},', lst_json)

        elapsed = time.time() - start
        ROOT_LOGGER.debug("Elapsed/per datapoint: %s / %s", elapsed,
                          elapsed / len(res))
        # self.assertLess(elapsed, len(res))  # less than 1 datapoint per sec is a no-go
        exp = [
            2.2144798867972773, 0.7207704268609725, 0.606834452578833,
            0.8284089170237546, 0.5858142211763572, 0.622922628329711,
            0.5529488620851849, 0.6933748292117727, 0.4876162181858197,
            0.42471180222446503, 0.2512251128133865
        ]
        self.assertEqual(
            exp,
            [x[DataPoint.CURRENT][''][KPISet.STDEV_RESP_TIME] for x in res])
コード例 #33
0
    def test_public_report(self):
        mock = BZMock()
        mock.mock_get.update({
            'https://a.blazemeter.com/api/v4/tests?workspaceId=1&name=Taurus+Test': {"result": []}
        })

        mock.mock_post.update({
            'https://a.blazemeter.com/api/v4/projects': {"result": {'id': 1}},
            'https://a.blazemeter.com/api/v4/tests': {'result': {'id': 'unittest1'}},
            'https://a.blazemeter.com/api/v4/tests/unittest1/start-external': {"result": {
                'session': {'id': 'sess1', 'userId': 1, 'testId': 1},
                'master': {'id': 'master1', 'userId': 1},
                'signature': ''
            }},
            'https://a.blazemeter.com/api/v4/masters/master1/public-token': {'result': {'publicToken': 'publicToken'}},
            'https://data.blazemeter.com/submit.php?session_id=sess1&signature=&test_id=1&user_id=1&pq=0&target=labels_bulk&update=1': {
                "result": {'session': {}}},
            'https://data.blazemeter.com/api/v4/image/sess1/files?signature=': {'result': True},
        })

        obj = BlazeMeterUploader()
        obj.settings['token'] = '123'
        obj.settings['browser-open'] = 'none'
        obj.settings['public-report'] = True
        obj.settings['send-monitoring'] = False
        obj.engine = EngineEmul()
        mock.apply(obj._user)
        self.sniff_log(obj.log)
        obj.prepare()
        obj.startup()
        obj.aggregated_second(random_datapoint(10))
        obj.check()
        obj.shutdown()
        obj.post_process()

        log_buff = self.log_recorder.info_buff.getvalue()
        log_line = "Public report link: https://a.blazemeter.com/app/?public-token=publicToken#/masters/master1/summary"
        self.assertIn(log_line, log_buff)
        ROOT_LOGGER.warning("\n".join([x['url'] for x in mock.requests]))
        self.assertEqual(14, len(mock.requests))
コード例 #34
0
ファイル: test_JTLReader.py プロジェクト: shukhov88/taurus
    def test_stdev_performance(self):
        start = time.time()
        self.configure(RESOURCES_DIR + "jmeter/jtl/slow-stdev.jtl")
        res = list(self.obj.datapoints(final_pass=True))
        lst_json = to_json(res)

        self.assertNotIn('"perc": {},', lst_json)

        elapsed = time.time() - start
        ROOT_LOGGER.debug("Elapsed/per datapoint: %s / %s", elapsed,
                          elapsed / len(res))
        # self.assertLess(elapsed, len(res))  # less than 1 datapoint per sec is a no-go
        exp = [
            0.53060066889723, 0.39251356581014, 0.388405157629,
            0.52855748890714, 0.39107758224016, 0.38999119030886,
            0.32537625773864, 0.47057465198195, 0.2746790136753,
            0.23251104555698, 0.08369447671202
        ]
        self.assertEqual(exp, [
            round(x[DataPoint.CURRENT][''][KPISet.STDEV_RESP_TIME], 14)
            for x in res
        ])
コード例 #35
0
    def assertFilesEqual(self,
                         expected,
                         actual,
                         replace_str="",
                         replace_with=""):
        # import shutil; shutil.copy(actual, expected)

        with open(expected) as exp, open(actual) as act:
            act_lines = [
                x.replace(replace_str, replace_with).rstrip()
                for x in act.readlines()
            ]
            exp_lines = [
                x.replace(replace_str, replace_with).rstrip()
                for x in exp.readlines()
            ]
            diff = list(difflib.unified_diff(exp_lines, act_lines))
            if diff:
                ROOT_LOGGER.info("Replacements are: %s => %s", replace_str,
                                 replace_with)
                msg = "Failed asserting that two files are equal:\n" + actual + "\nversus\n" + expected + "\nDiff is:\n"
                raise AssertionError(msg + "\n".join(diff))
コード例 #36
0
ファイル: test_JTLReader.py プロジェクト: andy7i/taurus
    def test_stdev_performance(self):
        start = time.time()
        self.configure(RESOURCES_DIR + "/jmeter/jtl/slow-stdev.jtl")
        res = list(self.obj.datapoints(final_pass=True))
        lst_json = to_json(res)

        self.assertNotIn('"perc": {},', lst_json)

        elapsed = time.time() - start
        ROOT_LOGGER.debug("Elapsed/per datapoint: %s / %s", elapsed, elapsed / len(res))
        # self.assertLess(elapsed, len(res))  # less than 1 datapoint per sec is a no-go
        exp = [2.2144798867972773,
               0.7207704268609725,
               0.606834452578833,
               0.8284089170237546,
               0.5858142211763572,
               0.622922628329711,
               0.5529488620851849,
               0.6933748292117727,
               0.4876162181858197,
               0.42471180222446503,
               0.2512251128133865]
        self.assertEqual(exp, [x[DataPoint.CURRENT][''][KPISet.STDEV_RESP_TIME] for x in res])
コード例 #37
0
ファイル: test_java.py プロジェクト: andy7i/taurus
 def test_not_junit(self):
     """
     Check that JUnit runner fails if no tests were found
     :return:
     """
     self.configure({
         ScenarioExecutor.EXEC: {
             "executor": "selenium",
             "scenario": {"script": RESOURCES_DIR + "selenium/invalid/NotJUnittest.java"}}})
     self.obj.prepare()
     self.assertIsInstance(self.obj.runner, JUnitTester)
     self.obj.startup()
     try:
         while not self.obj.check():
             time.sleep(self.obj.engine.check_interval)
         self.fail()
     except ToolError as exc:
         diagnostics = "\n".join(exc.diagnostics)
         self.assertIn("Nothing to test", diagnostics)
     except BaseException as exc:
         ROOT_LOGGER.debug(traceback.format_exc())
         self.fail("Unexpected exception %s, expected ToolError" % exc)
     self.obj.shutdown()
コード例 #38
0
ファイル: test_configuration.py プロジェクト: xmeng1/taurus
    def test_load(self):
        obj = Configuration()
        configs = [
            BASE_CONFIG, RESOURCES_DIR + "json/jmx.json",
            RESOURCES_DIR + "json/concurrency.json"
        ]
        obj.load(configs)
        ROOT_LOGGER.debug("config:\n%s", obj)

        fname = tempfile.mkstemp()[1]
        obj.dump(fname, Configuration.JSON)
        with open(fname) as fh:
            ROOT_LOGGER.debug("JSON:\n%s", fh.read())

        fname = tempfile.mkstemp()[1]
        obj.dump(fname, Configuration.YAML)
        with open(fname) as fh:
            ROOT_LOGGER.debug("YAML:\n%s", fh.read())
コード例 #39
0
    def test_load(self):
        obj = Configuration()
        configs = [
            BASE_CONFIG,
            RESOURCES_DIR + "json/jmx.json",
            RESOURCES_DIR + "json/concurrency.json"
        ]
        obj.load(configs)
        ROOT_LOGGER.debug("config:\n%s", obj)

        fname = tempfile.mkstemp()[1]
        obj.dump(fname, Configuration.JSON)
        with open(fname) as fh:
            ROOT_LOGGER.debug("JSON:\n%s", fh.read())

        fname = tempfile.mkstemp()[1]
        obj.dump(fname, Configuration.YAML)
        with open(fname) as fh:
            ROOT_LOGGER.debug("YAML:\n%s", fh.read())
コード例 #40
0
ファイル: test_pbench.py プロジェクト: zuozewei/taurus
 def check_schedule_size_estimate(self, execution):
     self.configure({
         ScenarioExecutor.EXEC: execution,
         "provisioning": "local",
     })
     load = self.obj.get_load()
     self.obj.generator = TaurusPBenchGenerator(self.obj, ROOT_LOGGER)
     self.obj.generator.generate_payload(self.obj.get_scenario())
     payload_count = len(self.obj.get_scenario().get('requests', []))
     sch = Scheduler(load, self.obj.generator.payload_file, ROOT_LOGGER)
     estimated_schedule_size = self.obj.generator._estimate_schedule_size(load, payload_count)
     ROOT_LOGGER.debug("Estimated schedule size: %s", estimated_schedule_size)
     items = list(sch.generate())
     actual_schedule_size = len(items)
     ROOT_LOGGER.debug("Actual schedule size: %s", actual_schedule_size)
     if actual_schedule_size != 0:
         error = abs(estimated_schedule_size - actual_schedule_size)
         error_rel = error / float(actual_schedule_size)
         ROOT_LOGGER.debug("Estimation error: %s", error)
         if error_rel >= 0.1:
             self.fail("Estimation failed (error=%s) on config %s" % (error_rel, pprint.pformat(execution)))
コード例 #41
0
ファイル: test_pbench.py プロジェクト: andy7i/taurus
 def check_schedule_size_estimate(self, execution):
     self.configure({
         ScenarioExecutor.EXEC: execution,
         "provisioning": "local",
     })
     load = self.obj.get_load()
     self.obj.generator = TaurusPBenchGenerator(self.obj, ROOT_LOGGER)
     self.obj.generator.generate_payload(self.obj.get_scenario())
     payload_count = len(self.obj.get_scenario().get('requests', []))
     sch = Scheduler(load, self.obj.generator.payload_file, ROOT_LOGGER)
     estimated_schedule_size = self.obj.generator._estimate_schedule_size(load, payload_count)
     ROOT_LOGGER.debug("Estimated schedule size: %s", estimated_schedule_size)
     items = list(sch.generate())
     actual_schedule_size = len(items)
     ROOT_LOGGER.debug("Actual schedule size: %s", actual_schedule_size)
     if actual_schedule_size != 0:
         error = abs(estimated_schedule_size - actual_schedule_size)
         error_rel = error / float(actual_schedule_size)
         ROOT_LOGGER.debug("Estimation error: %s", error)
         if error_rel >= 0.1:
             self.fail("Estimation failed (error=%s) on config %s" % (error_rel, pprint.pformat(execution)))
コード例 #42
0
    def test_xml_format_passfail(self):
        obj = JUnitXMLReporter()
        obj.engine = EngineEmul()
        obj.parameters = BetterDict()
        obj.engine.provisioning = CloudProvisioning()
        obj.engine.provisioning.results_url = "http://test/report/123"

        pass_fail1 = CriteriaProcessor([], None)

        crit_cfg1 = BetterDict()
        crit_cfg2 = BetterDict()
        crit_cfg3 = BetterDict()
        crit_cfg4 = BetterDict()

        crit_cfg1.merge({
            'stop': True, 'label': 'Sample 1 Triggered', 'fail': True,
            'timeframe': -1, 'threshold': '150ms', 'condition': '<', 'subject': 'avg-rt'})

        crit_cfg2.merge({
            'stop': True, 'label': 'Sample 1 Not Triggered', 'fail': True,
            'timeframe': -1, 'threshold': '300ms', 'condition': '>', 'subject': 'avg-rt'})

        crit_cfg3.merge({
            'stop': True, 'label': 'Sample 2 Triggered', 'fail': True, 'timeframe': -1,
            'threshold': '150ms', 'condition': '<=', 'subject': 'avg-rt'})

        crit_cfg4.merge({
            'stop': True, 'label': 'Sample 2 Not Triggered', 'fail': True,
            'timeframe': -1, 'threshold': '300ms', 'condition': '=', 'subject': 'avg-rt'})

        fc1_triggered = DataCriterion(crit_cfg1, pass_fail1)
        fc1_not_triggered = DataCriterion(crit_cfg2, pass_fail1)

        pass_fail2 = CriteriaProcessor([], None)

        fc2_triggered = DataCriterion(crit_cfg3, pass_fail1)
        fc2_not_triggered = DataCriterion(crit_cfg4, pass_fail1)

        pass_fail1.criteria.append(fc1_triggered)
        pass_fail1.criteria.append(fc1_not_triggered)
        pass_fail2.criteria.append(fc2_triggered)
        pass_fail2.criteria.append(fc2_not_triggered)

        fc1_triggered.is_triggered = True
        fc2_triggered.is_triggered = True

        pass_fail = PassFailStatus()
        pass_fail.processors.append(pass_fail1)
        pass_fail.processors.append(pass_fail2)
        obj.engine.reporters.append(pass_fail)
        obj.engine.reporters.append(BlazeMeterUploader())

        path_from_config = tempfile.mktemp(suffix='.xml', prefix='junit-xml_passfail', dir=obj.engine.artifacts_dir)

        obj.parameters.merge({"filename": path_from_config, "data-source": "pass-fail"})
        obj.prepare()
        obj.last_second = DataPoint(0)
        obj.post_process()

        with open(obj.report_file_path, 'rb') as fds:
            f_contents = fds.read()

        ROOT_LOGGER.info("File: %s", f_contents)
        xml_tree = etree.fromstring(f_contents)
        self.assertEqual('testsuites', xml_tree.tag)
        suite = xml_tree.getchildren()[0]
        self.assertEqual('testsuite', suite.tag)
        test_cases = suite.getchildren()
        self.assertEqual(4, len(test_cases))
        self.assertEqual('testcase', test_cases[0].tag)
        self.assertEqual('error', test_cases[0].getchildren()[1].tag)
        self.assertEqual('error', test_cases[2].getchildren()[1].tag)

        sys_out = test_cases[0].getchildren()[0]
        self.assertEqual('system-out', sys_out.tag)
        self.assertIn('BlazeMeter report link: http://test/report/123', sys_out.text)
コード例 #43
0
    def test_xml_format_sample_labels(self):
        # generate xml, compare hash

        obj = JUnitXMLReporter()
        obj.engine = EngineEmul()
        rep = BlazeMeterUploader()
        rep.results_url = "http://report/123"
        obj.engine.reporters.append(rep)

        path_from_config = tempfile.mktemp(suffix='.xml', prefix='junit-xml-sample-labels',
                                           dir=obj.engine.artifacts_dir)

        # data-source: finalstats by default
        obj.parameters = BetterDict.from_dict({"filename": path_from_config})

        obj.prepare()

        datapoint = DataPoint(0, [])
        cumul_data = datapoint[DataPoint.CUMULATIVE]

        cumul_data[""] = KPISet.from_dict({
            KPISet.AVG_CONN_TIME: 7.890211417203362e-06,
            KPISet.RESP_TIMES: Counter({
                0.0: 32160, 0.001: 24919, 0.002: 1049, 0.003: 630, 0.004: 224, 0.005: 125,
                0.006: 73, 0.007: 46, 0.008: 32, 0.009: 20, 0.011: 8, 0.01: 8, 0.017: 3,
                0.016: 3, 0.014: 3, 0.013: 3, 0.04: 2, 0.012: 2, 0.079: 1, 0.081: 1,
                0.019: 1, 0.015: 1}),
            KPISet.ERRORS: [{'msg': 'Forbidden', 'cnt': 7300, 'type': 0,
                             'urls': Counter({'http://192.168.1.1/anotherquery': 7300}),
                             KPISet.RESP_CODES: '403'},
                            {'msg': 'Assertion failed: text /smth/ not found', 'cnt': 73, 'type': 1,
                             'urls': Counter({'http://192.168.1.1/anotherquery': 73}),
                             KPISet.RESP_CODES: '200'},
                            ],
            KPISet.STDEV_RESP_TIME: 0.04947974228872108,
            KPISet.AVG_LATENCY: 0.0002825639815220692,
            KPISet.RESP_CODES: Counter({'304': 29656, '403': 29656, '200': 2}),
            KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001,
                                 '100.0': 0.081, '99.0': 0.003, '50.0': 0.0},
            KPISet.SUCCESSES: 29658,
            KPISet.SAMPLE_COUNT: 59314,
            KPISet.CONCURRENCY: 0,
            KPISet.AVG_RESP_TIME: 0.0005440536804127192,
            KPISet.FAILURES: 29656})

        cumul_data["http://192.168.1.1/somequery"] = KPISet.from_dict({
            KPISet.AVG_CONN_TIME: 9.609548856969457e-06,
            KPISet.RESP_TIMES: Counter({
                0.0: 17219, 0.001: 11246, 0.002: 543, 0.003: 341,
                0.004: 121,
                0.005: 66, 0.006: 36, 0.007: 33, 0.008: 18,
                0.009: 12, 0.011: 6,
                0.01: 5, 0.013: 2, 0.017: 2, 0.012: 2, 0.079: 1,
                0.016: 1,
                0.014: 1, 0.019: 1, 0.04: 1, 0.081: 1}),
            KPISet.ERRORS: [],
            KPISet.STDEV_RESP_TIME: 0.04073402130687656,
            KPISet.AVG_LATENCY: 1.7196034796682178e-06,
            KPISet.RESP_CODES: Counter({'304': 29656, '200': 2}),
            KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0,
                                 '99.9': 0.009,
                                 '90.0': 0.001,
                                 '100.0': 0.081,
                                 '99.0': 0.004,
                                 '50.0': 0.0},
            KPISet.SUCCESSES: 29658,
            KPISet.SAMPLE_COUNT: 29658,
            KPISet.CONCURRENCY: 0,
            KPISet.AVG_RESP_TIME: 0.0005164542450603551, KPISet.FAILURES: 0})

        cumul_data["http://192.168.1.1/anotherquery"] = KPISet.from_dict({
            KPISet.AVG_CONN_TIME: 6.1707580253574335e-06,
            KPISet.RESP_TIMES: Counter({0.0: 14941, 0.001: 13673, 0.002: 506,
                                        0.003: 289, 0.004: 103,
                                        0.005: 59, 0.006: 37, 0.008: 14,
                                        0.007: 13, 0.009: 8, 0.01: 3,
                                        0.011: 2, 0.016: 2, 0.014: 2,
                                        0.017: 1, 0.013: 1, 0.015: 1,
                                        0.04: 1}),
            KPISet.ERRORS: [
                {'msg': 'Forbidden', 'cnt': 7300, 'type': 0,
                 'urls': Counter({'http://192.168.1.1/anotherquery': 7300}),
                 KPISet.RESP_CODES: '403'},
                {'msg': 'Assertion failed: text /smth/ not found', 'cnt': 73, 'type': 1,
                 'urls': Counter({'http://192.168.1.1/anotherquery': 73}),
                 KPISet.RESP_CODES: '200'},
            ],
            KPISet.STDEV_RESP_TIME: 0.032465137860758844,
            KPISet.AVG_LATENCY: 0.0005634272997032645,
            KPISet.RESP_CODES: Counter({'403': 29656}),
            KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0,
                                 '99.9': 0.008, '90.0': 0.001,
                                 '100.0': 0.04, '99.0': 0.003,
                                 '50.0': 0.0},
            KPISet.SUCCESSES: 0,
            KPISet.SAMPLE_COUNT: 29656,
            KPISet.CONCURRENCY: 0,
            KPISet.AVG_RESP_TIME: 0.0005716549770704078,
            KPISet.FAILURES: 29656})

        cumul_data["http://192.168.100.100/somequery"] = KPISet.from_dict({
            KPISet.AVG_CONN_TIME: 9.609548856969457e-06,
            KPISet.RESP_TIMES: Counter({
                0.0: 17219, 0.001: 11246, 0.002: 543,
                0.003: 341, 0.004: 121,
                0.005: 66, 0.006: 36, 0.007: 33, 0.008: 18,
                0.009: 12, 0.011: 6,
                0.01: 5, 0.013: 2, 0.017: 2, 0.012: 2,
                0.079: 1, 0.016: 1,
                0.014: 1, 0.019: 1, 0.04: 1, 0.081: 1}),
            KPISet.ERRORS: [],
            KPISet.STDEV_RESP_TIME: 0.04073402130687656,
            KPISet.AVG_LATENCY: 1.7196034796682178e-06,
            KPISet.RESP_CODES: Counter({'304': 29656, '200': 2}),
            KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0,
                                 '99.9': 0.009, '90.0': 0.001,
                                 '100.0': 0.081, '99.0': 0.004,
                                 '50.0': 0.0},
            KPISet.SUCCESSES: 29658,
            KPISet.SAMPLE_COUNT: 29658,
            KPISet.CONCURRENCY: 0,
            KPISet.AVG_RESP_TIME: 0.0005164542450603551,
            KPISet.FAILURES: 0})

        obj.aggregated_second(datapoint)

        obj.post_process()

        with open(obj.report_file_path, 'rb') as fds:
            f_contents = fds.read()

        ROOT_LOGGER.info("File: %s", f_contents)
        xml_tree = etree.fromstring(f_contents)
        self.assertEqual('testsuites', xml_tree.tag)
        suite = xml_tree.getchildren()[0]
        self.assertEqual('testsuite', suite.tag)
        self.assertListEqual(['sample_labels', "bzt"], suite.values())
        test_cases = suite.getchildren()
        self.assertEqual(3, len(test_cases))
        self.assertEqual('testcase', test_cases[0].tag)
        self.assertEqual('error', test_cases[0].getchildren()[1].tag)
        self.assertEqual('failure', test_cases[0].getchildren()[2].tag)
        self.assertEqual('system-out', test_cases[0].getchildren()[0].tag)
        self.assertIn('BlazeMeter report link: http://report/123', test_cases[0].getchildren()[0].text)
コード例 #44
0
 def monitoring_data(self, data):
     ROOT_LOGGER.debug("Data: %s", data)
コード例 #45
0
    def test_xml_format_sample_labels(self):
        # generate xml, compare hash

        obj = JUnitXMLReporter()
        obj.engine = EngineEmul()
        rep = BlazeMeterUploader()
        rep.results_url = "http://report/123"
        obj.engine.reporters.append(rep)

        path_from_config = tempfile.mktemp(suffix='.xml',
                                           prefix='junit-xml-sample-labels',
                                           dir=obj.engine.artifacts_dir)

        # data-source: finalstats by default
        obj.parameters = BetterDict.from_dict({"filename": path_from_config})

        obj.prepare()

        datapoint = DataPoint(0, [])
        cumul_data = datapoint[DataPoint.CUMULATIVE]

        cumul_data[""] = KPISet.from_dict({
            KPISet.AVG_CONN_TIME:
            7.890211417203362e-06,
            KPISet.RESP_TIMES:
            Counter({
                0.0: 32160,
                0.001: 24919,
                0.002: 1049,
                0.003: 630,
                0.004: 224,
                0.005: 125,
                0.006: 73,
                0.007: 46,
                0.008: 32,
                0.009: 20,
                0.011: 8,
                0.01: 8,
                0.017: 3,
                0.016: 3,
                0.014: 3,
                0.013: 3,
                0.04: 2,
                0.012: 2,
                0.079: 1,
                0.081: 1,
                0.019: 1,
                0.015: 1
            }),
            KPISet.ERRORS: [
                {
                    'msg': 'Forbidden',
                    'cnt': 7300,
                    'type': 0,
                    'urls': Counter({'http://192.168.1.1/anotherquery': 7300}),
                    KPISet.RESP_CODES: '403'
                },
                {
                    'msg': 'Assertion failed: text /smth/ not found',
                    'cnt': 73,
                    'type': 1,
                    'urls': Counter({'http://192.168.1.1/anotherquery': 73}),
                    KPISet.RESP_CODES: '200'
                },
            ],
            KPISet.STDEV_RESP_TIME:
            0.04947974228872108,
            KPISet.AVG_LATENCY:
            0.0002825639815220692,
            KPISet.RESP_CODES:
            Counter({
                '304': 29656,
                '403': 29656,
                '200': 2
            }),
            KPISet.PERCENTILES: {
                '95.0': 0.001,
                '0.0': 0.0,
                '99.9': 0.008,
                '90.0': 0.001,
                '100.0': 0.081,
                '99.0': 0.003,
                '50.0': 0.0
            },
            KPISet.SUCCESSES:
            29658,
            KPISet.SAMPLE_COUNT:
            59314,
            KPISet.CONCURRENCY:
            0,
            KPISet.AVG_RESP_TIME:
            0.0005440536804127192,
            KPISet.FAILURES:
            29656
        })

        cumul_data["http://192.168.1.1/somequery"] = KPISet.from_dict({
            KPISet.AVG_CONN_TIME:
            9.609548856969457e-06,
            KPISet.RESP_TIMES:
            Counter({
                0.0: 17219,
                0.001: 11246,
                0.002: 543,
                0.003: 341,
                0.004: 121,
                0.005: 66,
                0.006: 36,
                0.007: 33,
                0.008: 18,
                0.009: 12,
                0.011: 6,
                0.01: 5,
                0.013: 2,
                0.017: 2,
                0.012: 2,
                0.079: 1,
                0.016: 1,
                0.014: 1,
                0.019: 1,
                0.04: 1,
                0.081: 1
            }),
            KPISet.ERRORS: [],
            KPISet.STDEV_RESP_TIME:
            0.04073402130687656,
            KPISet.AVG_LATENCY:
            1.7196034796682178e-06,
            KPISet.RESP_CODES:
            Counter({
                '304': 29656,
                '200': 2
            }),
            KPISet.PERCENTILES: {
                '95.0': 0.001,
                '0.0': 0.0,
                '99.9': 0.009,
                '90.0': 0.001,
                '100.0': 0.081,
                '99.0': 0.004,
                '50.0': 0.0
            },
            KPISet.SUCCESSES:
            29658,
            KPISet.SAMPLE_COUNT:
            29658,
            KPISet.CONCURRENCY:
            0,
            KPISet.AVG_RESP_TIME:
            0.0005164542450603551,
            KPISet.FAILURES:
            0
        })

        cumul_data["http://192.168.1.1/anotherquery"] = KPISet.from_dict({
            KPISet.AVG_CONN_TIME:
            6.1707580253574335e-06,
            KPISet.RESP_TIMES:
            Counter({
                0.0: 14941,
                0.001: 13673,
                0.002: 506,
                0.003: 289,
                0.004: 103,
                0.005: 59,
                0.006: 37,
                0.008: 14,
                0.007: 13,
                0.009: 8,
                0.01: 3,
                0.011: 2,
                0.016: 2,
                0.014: 2,
                0.017: 1,
                0.013: 1,
                0.015: 1,
                0.04: 1
            }),
            KPISet.ERRORS: [
                {
                    'msg': 'Forbidden',
                    'cnt': 7300,
                    'type': 0,
                    'urls': Counter({'http://192.168.1.1/anotherquery': 7300}),
                    KPISet.RESP_CODES: '403'
                },
                {
                    'msg': 'Assertion failed: text /smth/ not found',
                    'cnt': 73,
                    'type': 1,
                    'urls': Counter({'http://192.168.1.1/anotherquery': 73}),
                    KPISet.RESP_CODES: '200'
                },
            ],
            KPISet.STDEV_RESP_TIME:
            0.032465137860758844,
            KPISet.AVG_LATENCY:
            0.0005634272997032645,
            KPISet.RESP_CODES:
            Counter({'403': 29656}),
            KPISet.PERCENTILES: {
                '95.0': 0.001,
                '0.0': 0.0,
                '99.9': 0.008,
                '90.0': 0.001,
                '100.0': 0.04,
                '99.0': 0.003,
                '50.0': 0.0
            },
            KPISet.SUCCESSES:
            0,
            KPISet.SAMPLE_COUNT:
            29656,
            KPISet.CONCURRENCY:
            0,
            KPISet.AVG_RESP_TIME:
            0.0005716549770704078,
            KPISet.FAILURES:
            29656
        })

        cumul_data["http://192.168.100.100/somequery"] = KPISet.from_dict({
            KPISet.AVG_CONN_TIME:
            9.609548856969457e-06,
            KPISet.RESP_TIMES:
            Counter({
                0.0: 17219,
                0.001: 11246,
                0.002: 543,
                0.003: 341,
                0.004: 121,
                0.005: 66,
                0.006: 36,
                0.007: 33,
                0.008: 18,
                0.009: 12,
                0.011: 6,
                0.01: 5,
                0.013: 2,
                0.017: 2,
                0.012: 2,
                0.079: 1,
                0.016: 1,
                0.014: 1,
                0.019: 1,
                0.04: 1,
                0.081: 1
            }),
            KPISet.ERRORS: [],
            KPISet.STDEV_RESP_TIME:
            0.04073402130687656,
            KPISet.AVG_LATENCY:
            1.7196034796682178e-06,
            KPISet.RESP_CODES:
            Counter({
                '304': 29656,
                '200': 2
            }),
            KPISet.PERCENTILES: {
                '95.0': 0.001,
                '0.0': 0.0,
                '99.9': 0.009,
                '90.0': 0.001,
                '100.0': 0.081,
                '99.0': 0.004,
                '50.0': 0.0
            },
            KPISet.SUCCESSES:
            29658,
            KPISet.SAMPLE_COUNT:
            29658,
            KPISet.CONCURRENCY:
            0,
            KPISet.AVG_RESP_TIME:
            0.0005164542450603551,
            KPISet.FAILURES:
            0
        })

        obj.aggregated_second(datapoint)

        obj.post_process()

        with open(obj.report_file_path, 'rb') as fds:
            f_contents = fds.read()

        ROOT_LOGGER.info("File: %s", f_contents)
        xml_tree = etree.fromstring(f_contents)
        self.assertEqual('testsuites', xml_tree.tag)
        suite = xml_tree.getchildren()[0]
        self.assertEqual('testsuite', suite.tag)
        self.assertListEqual(['sample_labels', "bzt"], suite.values())
        test_cases = suite.getchildren()
        self.assertEqual(3, len(test_cases))
        self.assertEqual('testcase', test_cases[0].tag)
        self.assertEqual('error', test_cases[0].getchildren()[1].tag)
        self.assertEqual('failure', test_cases[0].getchildren()[2].tag)
        self.assertEqual('system-out', test_cases[0].getchildren()[0].tag)
        self.assertIn('BlazeMeter report link: http://report/123',
                      test_cases[0].getchildren()[0].text)
コード例 #46
0
    def test_xml_format_passfail(self):
        obj = JUnitXMLReporter()
        obj.engine = EngineEmul()
        obj.parameters = BetterDict()
        obj.engine.provisioning = CloudProvisioning()
        obj.engine.provisioning.results_url = "http://test/report/123"

        pass_fail1 = CriteriaProcessor([], None)

        crit_cfg1 = BetterDict()
        crit_cfg2 = BetterDict()
        crit_cfg3 = BetterDict()
        crit_cfg4 = BetterDict()

        crit_cfg1.merge({
            'stop': True,
            'label': 'Sample 1 Triggered',
            'fail': True,
            'timeframe': -1,
            'threshold': '150ms',
            'condition': '<',
            'subject': 'avg-rt'
        })

        crit_cfg2.merge({
            'stop': True,
            'label': 'Sample 1 Not Triggered',
            'fail': True,
            'timeframe': -1,
            'threshold': '300ms',
            'condition': '>',
            'subject': 'avg-rt'
        })

        crit_cfg3.merge({
            'stop': True,
            'label': 'Sample 2 Triggered',
            'fail': True,
            'timeframe': -1,
            'threshold': '150ms',
            'condition': '<=',
            'subject': 'avg-rt'
        })

        crit_cfg4.merge({
            'stop': True,
            'label': 'Sample 2 Not Triggered',
            'fail': True,
            'timeframe': -1,
            'threshold': '300ms',
            'condition': '=',
            'subject': 'avg-rt'
        })

        fc1_triggered = DataCriterion(crit_cfg1, pass_fail1)
        fc1_not_triggered = DataCriterion(crit_cfg2, pass_fail1)

        pass_fail2 = CriteriaProcessor([], None)

        fc2_triggered = DataCriterion(crit_cfg3, pass_fail1)
        fc2_not_triggered = DataCriterion(crit_cfg4, pass_fail1)

        pass_fail1.criteria.append(fc1_triggered)
        pass_fail1.criteria.append(fc1_not_triggered)
        pass_fail2.criteria.append(fc2_triggered)
        pass_fail2.criteria.append(fc2_not_triggered)

        fc1_triggered.is_triggered = True
        fc2_triggered.is_triggered = True

        pass_fail = PassFailStatus()
        pass_fail.processors.append(pass_fail1)
        pass_fail.processors.append(pass_fail2)
        obj.engine.reporters.append(pass_fail)
        obj.engine.reporters.append(BlazeMeterUploader())

        path_from_config = tempfile.mktemp(suffix='.xml',
                                           prefix='junit-xml_passfail',
                                           dir=obj.engine.artifacts_dir)

        obj.parameters.merge({
            "filename": path_from_config,
            "data-source": "pass-fail"
        })
        obj.prepare()
        obj.last_second = DataPoint(0)
        obj.post_process()

        with open(obj.report_file_path, 'rb') as fds:
            f_contents = fds.read()

        ROOT_LOGGER.info("File: %s", f_contents)
        xml_tree = etree.fromstring(f_contents)
        self.assertEqual('testsuites', xml_tree.tag)
        suite = xml_tree.getchildren()[0]
        self.assertEqual('testsuite', suite.tag)
        test_cases = suite.getchildren()
        self.assertEqual(4, len(test_cases))
        self.assertEqual('testcase', test_cases[0].tag)
        self.assertEqual('error', test_cases[0].getchildren()[1].tag)
        self.assertEqual('error', test_cases[2].getchildren()[1].tag)

        sys_out = test_cases[0].getchildren()[0]
        self.assertEqual('system-out', sys_out.tag)
        self.assertIn('BlazeMeter report link: http://test/report/123',
                      sys_out.text)