Example #1
0
    def test_simple_compilation(self):
        super(TestJavaC, self).setUp()
        engine_obj = EngineEmul()

        self.obj = JUnitTester()
        self.obj.settings = engine_obj.config.get("modules").get("junit")
        self.obj.engine = engine_obj

        self.obj.execution.merge({
            "scenario": {"script": RESOURCES_DIR + "selenium/invalid/selenium1.java"}
        })

        self.obj.prepare()
        self.obj.post_process()
Example #2
0
 def test_log_messages_duration(self):
     """
     Test duration report
     :return:
     """
     obj = FinalStatus()
     obj.engine = EngineEmul()
     obj.parameters = BetterDict()
     self.sniff_log(obj.log)
     obj.prepare()
     obj.startup()
     obj.shutdown()
     obj.start_time -= 120005
     obj.post_process()
     self.assertEqual("Test duration: 1 day, 9:20:05\n", self.log_recorder.info_buff.getvalue())
Example #3
0
    def test_dump(self):
        obj = FinalStatus()
        obj.engine = EngineEmul()
        obj.parameters = BetterDict.from_dict({
            "dump-xml": obj.engine.create_artifact("status", ".xml"),
            "dump-csv": obj.engine.create_artifact("status", ".csv")
        })
        self.sniff_log(obj.log)

        obj.aggregated_second(random_datapoint(time.time()))
        obj.startup()
        obj.shutdown()

        obj.post_process()
        self.assertIn("XML", self.log_recorder.info_buff.getvalue())
Example #4
0
 def test_401(self):
     obj = BlazeMeterUploader()
     obj.engine = EngineEmul()
     mock = BZMock(obj._user)
     mock.mock_get.update({
         'https://a.blazemeter.com/api/v4/web/version':
         HTTPError(
             None,
             None,
             None,
             None,
             None,
         ),
     })
     self.assertRaises(HTTPError, obj.prepare)
Example #5
0
    def test_load_reader_real2(self):
        reader1 = ApiritifLoadReader(self.obj.log)
        reader1.engine = EngineEmul()
        reader1.register_file(RESOURCES_DIR +
                              "jmeter/jtl/apiritif-results/apiritif-0.csv")
        reader1.register_file(RESOURCES_DIR +
                              "jmeter/jtl/apiritif-results/apiritif-1.csv")

        reader2 = ApiritifLoadReader(self.obj.log)
        reader2.engine = EngineEmul()
        reader2.register_file(RESOURCES_DIR +
                              "jmeter/jtl/apiritif-results/apiritif--10.csv")
        reader2.register_file(RESOURCES_DIR +
                              "jmeter/jtl/apiritif-results/apiritif--11.csv")

        reader = ConsolidatingAggregator()
        reader.engine = EngineEmul()
        reader.add_underling(reader1)
        reader.add_underling(reader2)

        items = list(reader.datapoints())
        self.assertEqual(39, len(items))
        self.assertEqual(4,
                         items[-1][DataPoint.CURRENT][''][KPISet.CONCURRENCY])
    def test_1(self):
        obj = ConsoleStatusReporter()
        self.sniff_log(obj.log)
        obj.engine = EngineEmul()
        obj.engine.provisioning = Local()
        obj.engine.provisioning.start_time = time.time()
        obj.engine.config[Provisioning.PROV] = ''
        jmeter = self.get_jmeter()
        jmeter.engine = obj.engine
        jmeter.execution[ScenarioExecutor.HOLD_FOR] = 10
        jmeter.execution.merge({'hold-for': 0, 'ramp-up': 0})
        jmeter.delay = 10
        jmeter.prepare()
        widget = jmeter.get_widget()
        widget.update()
        jmeter.startup()
        widget.update()
        obj.engine.provisioning.executors = [jmeter]
        obj.settings["disable"] = False
        obj.settings['dummy_cols'] = 160
        obj.settings['dummy_rows'] = 40
        obj.settings['disable'] = False
        obj.prepare()
        obj.startup()

        obj.check()
        obj.temp_stream.write("test1\n")
        obj.temp_stream.flush()
        obj.temp_stream.write("test1\n")
        obj.temp_stream.flush()
        obj.check()

        for n in range(0, 10):
            point = self.__get_datapoint(n)
            obj.aggregated_second(point)
            obj.temp_stream.write("test %s\n" % n)
            obj.temp_stream.flush()
            obj.check()
            self.assertTrue(obj.screen.started)

        point = self.__get_datapoint(11)
        point[DataPoint.CURRENT][''][KPISet.RESP_CODES][''] = 1
        obj.aggregated_second(point)

        obj.check()
        obj.shutdown()
        obj.post_process()
        self.assertNotIn('Failed', self.log_recorder.warn_buff.getvalue())
Example #7
0
    def test_psutil_potential_bugs(self):
        conf = {'metrics': ['cpu', 'mem', 'disks', 'conn-all']}
        client = LocalClient(ROOT_LOGGER, 'label', conf, EngineEmul())
        client.connect()

        import psutil

        try:
            net_io_counters = psutil.net_io_counters
            disk_io_counters = psutil.disk_io_counters
            psutil.net_io_counters = lambda: None
            psutil.disk_io_counters = lambda: None

            client.monitor.resource_stats()  # should throw no exception
        finally:
            psutil.net_io_counters = net_io_counters
            psutil.disk_io_counters = disk_io_counters
Example #8
0
    def setUp(self):
        super(TestCLI, self).setUp()
        self.logger = self.log
        os.makedirs(BUILD_DIR, exist_ok=True)
        self.log = os.path.join(BUILD_DIR, "bzt.log")
        self.verbose = False
        self.quiet = True
        self.no_system_configs = True
        self.option = []

        self.obj = CLI(self)
        self.assertTrue(os.path.exists(self.log))

        self.clean_log(self.logger)

        self.aliases = []
        self.obj.engine = EngineEmul()
Example #9
0
 def test_direct_feeding(self):
     obj = BlazeMeterUploader()
     self.sniff_log(obj.log)
     obj.engine = EngineEmul()
     mock = BZMock(obj._user)
     mock.mock_post.update({
         'https://data.blazemeter.com/submit.php?session_id=direct&signature=sign&test_id=None&user_id=None&pq=0&target=labels_bulk&update=1':
         {},
         'https://data.blazemeter.com/api/v4/image/direct/files?signature=sign':
         {
             "result": True
         },
         'https://a.blazemeter.com/api/v4/sessions/direct/stop': {
             "result": True
         },
         'https://data.blazemeter.com/submit.php?session_id=direct&signature=sign&test_id=None&user_id=None&pq=0&target=engine_health&update=1':
         {
             'result': {
                 'session': {}
             }
         }
     })
     mock.mock_get.update({
         'https://a.blazemeter.com/api/v4/sessions/direct': {
             "result": {}
         }
     })
     mock.mock_patch.update({
         'https://a.blazemeter.com/api/v4/sessions/direct': {
             "result": {}
         }
     })
     obj.parameters['session-id'] = 'direct'
     obj.parameters['signature'] = 'sign'
     obj.settings['token'] = 'FakeToken'
     obj.prepare()
     obj.startup()
     obj.check()
     obj.shutdown()
     obj.engine.stopping_reason = TaurusException("To cover")
     obj.post_process()
     self.assertNotIn("Failed to finish online",
                      self.log_recorder.warn_buff.getvalue())
     self.assertEquals('direct', obj._session['id'])
     self.assertEqual(9, len(mock.requests),
                      "Requests were: %s" % mock.requests)
Example #10
0
    def setUp(self):
        super(TestJUnitTester, self).setUp()
        engine_obj = EngineEmul()
        paths = [local_paths_config()]
        engine_obj.configure(paths)

        # just download geckodriver & chromedriver with selenium
        selenium = SeleniumExecutor()
        selenium.engine = engine_obj
        selenium.install_required_tools()
        for driver in selenium.webdrivers:
            selenium.env.add_path({"PATH": driver.get_driver_dir()})

        self.obj = JUnitTester()
        self.obj.env = selenium.env
        self.obj.settings = engine_obj.config.get("modules").get("junit")
        self.obj.engine = engine_obj
Example #11
0
 def test_func_report_all_no_stacktrace(self):
     obj = FinalStatus()
     obj.engine = EngineEmul()
     obj.parameters = BetterDict.from_dict({"report-tests": "all", "print-stacktrace": False})
     self.sniff_log(obj.log)
     obj.prepare()
     obj.startup()
     obj.shutdown()
     obj.aggregated_results(*self.__get_func_tree())
     obj.post_process()
     info_log = self.log_recorder.info_buff.getvalue()
     self.assertIn("Total: 3 tests", info_log)
     self.assertIn("Test TestClass.case1 - PASSED", info_log)
     self.assertIn("Test TestClass.case2 - FAILED", info_log)
     self.assertIn("Test TestClass.case3 - BROKEN", info_log)
     self.assertNotIn("stacktrace2", info_log)
     self.assertNotIn("stacktrace3", info_log)
Example #12
0
 def test_func_report(self):
     obj = FinalStatus()
     obj.engine = EngineEmul()
     obj.parameters = BetterDict()
     self.sniff_log(obj.log)
     obj.prepare()
     obj.startup()
     obj.shutdown()
     obj.aggregated_results(*self.__get_func_tree())
     obj.post_process()
     info_log = self.log_recorder.info_buff.getvalue()
     warn_log = self.log_recorder.warn_buff.getvalue()
     self.assertIn("Total: 3 tests", info_log)
     self.assertIn("Test TestClass.case2 failed: something broke", warn_log)
     self.assertIn("stacktrace2", warn_log)
     self.assertIn("Test TestClass.case3 failed: something is badly broken", warn_log)
     self.assertIn("stacktrace3", warn_log)
Example #13
0
    def test_passes_artifacts_dir_with_envs(self):
        cmdline = "echo %TAURUS_ARTIFACTS_DIR%" if is_windows() else "echo $TAURUS_ARTIFACTS_DIR"
        engine = EngineEmul({
            "settings": {
                "env": {"BZT_ARTIFACTS_DIR_ENV_TEST": "custom_dir_from_env"},
                "artifacts-dir": get_uniq_name(directory=get_full_path(TEST_DIR),
                                               prefix="${BZT_ARTIFACTS_DIR_ENV_TEST}/%Y-%m-%d_%H-%M-%S.%f")
            }})
        engine.eval_env()
        engine.prepare()
        executor = self.obj
        executor.engine = engine
        process = executor._execute(cmdline, shell=True)
        stdout, _ = communicate(process)
        self.assertEqual(engine.artifacts_dir, stdout.strip())

        if "BZT_ARTIFACTS_DIR_ENV_TEST" in os.environ:
            os.environ.pop("BZT_ARTIFACTS_DIR_ENV_TEST")
Example #14
0
 def test_worker_aggregation(self):
     self.configure({
         "execution": {
             "scenario": {
                 "script": RESOURCES_DIR + "locust/simple.py"
             }
         }
     })
     self.obj.prepare()
     self.obj.reader = WorkersReader(
         RESOURCES_DIR + "locust/locust-workers.ldjson", 2, ROOT_LOGGER)
     self.obj.engine.aggregator = ConsolidatingAggregator()
     self.obj.engine.aggregator.engine = EngineEmul()
     self.obj.engine.aggregator.add_underling(self.obj.reader)
     self.assertEqual(
         107,
         len(list(self.obj.engine.aggregator.datapoints(final_pass=True))))
     self.obj.post_process()
Example #15
0
 def setUp(self):
     super(TaskTestCase, self).setUp()
     self.obj = ShellExecutor()
     self.obj.parameters = BetterDict()
     self.obj.engine = EngineEmul()
     self.obj.engine.config.merge({
         "provisioning": "local",
         "modules": {
             "local": {
                 "class": "bzt.modules.provisioning.Local"
             },
             "cloud": {
                 "class": "bzt.modules.blazemeter.CloudProvisioning"
             },
         }
     })
     self.obj.engine.default_cwd = os.getcwd()
     self.sniff_log(self.obj.log)
Example #16
0
    def test_log_messages_failed_labels(self):
        obj = FinalStatus()
        obj.engine = EngineEmul()
        obj.parameters = BetterDict.from_dict({
            "failed-labels": True,
            "percentiles": False,
            "summary": False,
            "test-duration": False
        })
        self.sniff_log(obj.log)

        obj.startup()
        obj.shutdown()
        obj.aggregated_second(self.__get_datapoint())
        obj.post_process()
        self.assertIn(
            "29656 failed samples: http://192.168.1.1/anotherquery\n",
            self.log_recorder.info_buff.getvalue())
Example #17
0
    def test_xml_report_test_duration(self):
        obj = FinalStatus()
        obj.engine = EngineEmul()
        xml_report = obj.engine.create_artifact("status", ".xml")
        obj.parameters = BetterDict.from_dict({
            "dump-xml": xml_report,
        })

        obj.startup()
        obj.aggregated_second(self.__get_datapoint(ts=90))
        obj.aggregated_second(self.__get_datapoint(ts=100))
        obj.shutdown()
        obj.post_process()

        self.assertTrue(os.path.exists(xml_report))
        with open(xml_report) as fds:
            report_content = fds.read()
        self.assertIn('<TestDuration>10.0</TestDuration>', report_content)
Example #18
0
    def test_log_messages_samples_count(self):
        obj = FinalStatus()
        obj.engine = EngineEmul()
        obj.parameters = BetterDict.from_dict({
            "failed-labels": False,
            "percentiles": False,
            "summary": True,
            "test-duration": False,
            "summary-labels": False
        })
        self.sniff_log(obj.log)
        obj.aggregated_second(self.__get_datapoint())
        obj.startup()
        obj.shutdown()
        obj.post_process()

        self.assertEqual("Samples count: 59314, 50.00% failures\n",
                         self.log_recorder.info_buff.getvalue())
Example #19
0
    def test_new_project_existing_test(self):
        obj = BlazeMeterUploader()
        mock = BZMock(obj._user)
        mock.mock_get.update({
            'https://a.blazemeter.com/api/v4/tests?workspaceId=1&name=Taurus+Test':
            {
                'result': [{
                    'id': 1,
                    'name': 'Taurus Test',
                    'configuration': {
                        "type": 'external'
                    }
                }]
            },
            'https://a.blazemeter.com/api/v4/tests?projectId=1&name=Taurus+Test':
            {
                'result': []
            }
        })

        mock.mock_post.update({
            'https://a.blazemeter.com/api/v4/projects': {
                "result": {
                    "id": 1
                }
            },
            'https://a.blazemeter.com/api/v4/tests': {
                "result": {
                    "id": 1
                }
            },
        })

        obj.parameters['project'] = 'Proj name'
        obj.settings['token'] = '123'
        obj.settings['browser-open'] = 'none'
        obj.engine = EngineEmul()
        obj.prepare()
        self.assertEquals('https://a.blazemeter.com/api/v4/projects',
                          mock.requests[4]['url'])
        self.assertEquals('POST', mock.requests[4]['method'])
        self.assertEquals('https://a.blazemeter.com/api/v4/tests',
                          mock.requests[6]['url'])
        self.assertEquals('POST', mock.requests[6]['method'])
Example #20
0
 def test_monitoring_buffer_limit_option(self):
     obj = BlazeMeterUploader()
     obj.engine = EngineEmul()
     mock = BZMock(obj._user)
     obj.settings["monitoring-buffer-limit"] = 100
     obj.prepare()
     for i in range(1000):
         mon = [{
             "ts": i,
             "source": "local",
             "cpu": float(i) / 1000 * 100,
             "mem": 2,
             "bytes-recv": 100,
             "other": 0
         }]
         obj.monitoring_data(mon)
         for source, buffer in iteritems(obj.monitoring_buffer.data):
             self.assertLessEqual(len(buffer), 100)
     self.assertEqual(1, len(mock.requests))
Example #21
0
    def test_exception(self):
        local = Local()
        local.engine = EngineEmul()
        local.engine.config.merge({EXEC: [{}]})
        local.engine.config.get("settings")["default-executor"] = "mock"
        local.engine.unify_config()
        local.prepare()
        local.startup()

        local.check()

        local.shutdown()
        try:
            local.post_process()
        except ToolError as exc:
            self.assertNotIn('DIAGNOSTICS', str(exc))
            self.assertIsNotNone(exc.diagnostics)
            self.assertEqual(exc.diagnostics, ['DIAGNOSTICS'])
        except BaseException as exc:
            self.fail("Was supposed to fail with ToolError, but crashed with %s" % exc)
Example #22
0
 def test_anonymous_feeding(self):
     obj = BlazeMeterUploader()
     obj.engine = EngineEmul()
     obj.browser_open = False
     mock = BZMock(obj._user)
     mock.mock_post.update({
         'https://a.blazemeter.com/api/v4/sessions': {
             "result": {
                 "signature": "sign",
                 "publicTokenUrl": "publicUrl",
                 "session": {
                     "id": 1,
                     "testId": 1,
                     "userId": 1
                 },
                 "master": {
                     "id": 1
                 },
             }
         },
         'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=labels_bulk&update=1':
         {},
         'https://data.blazemeter.com/api/v4/image/1/files?signature=sign':
         {
             "result": True
         },
         'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=engine_health&update=1':
         {
             'result': {
                 'session': {}
             }
         },
     })
     obj.prepare()
     obj.startup()
     obj.check()
     obj.shutdown()
     obj.post_process()
     self.assertEquals(1, obj._session['id'])
     self.assertEqual(6, len(mock.requests),
                      "Requests were: %s" % mock.requests)
Example #23
0
    def test_log_messages_summary_labels(self):
        obj = FinalStatus()
        obj.engine = EngineEmul()
        obj.parameters = BetterDict.from_dict({"summary-labels": True, "percentiles": False, "summary": False,
                                               "test-duration": False})
        self.sniff_log(obj.log)

        obj.startup()
        obj.shutdown()
        obj.aggregated_second(self.__get_datapoint())

        expected = ("Request label stats:\n"
                    "+----------------------------------+--------+---------+--------+-----------+\n"
                    "| label                            | status |    succ | avg_rt | error     |\n"
                    "+----------------------------------+--------+---------+--------+-----------+\n"
                    "| http://192.168.1.1/anotherquery  |  FAIL  |   0.00% |  0.001 | Forbidden |\n"
                    "| http://192.168.1.1/somequery     |   OK   | 100.00% |  0.001 |           |\n"
                    "| http://192.168.100.100/somequery |   OK   | 100.00% |  0.001 |           |\n"
                    "+----------------------------------+--------+---------+--------+-----------+\n")
        obj.post_process()
        self.assertIn(expected, self.log_recorder.info_buff.getvalue())
Example #24
0
    def test_load_reader(self):
        reader = ApiritifLoadReader(self.obj.log)
        reader.engine = EngineEmul()

        # add empty reader
        with tempfile.NamedTemporaryFile() as f_name:
            reader.register_file(f_name.name)
            items = list(reader.datapoints(True))

        self.assertEqual(len(items), 0)
        self.assertFalse(reader.read_records)
        reader.register_file(RESOURCES_DIR + "jmeter/jtl/tranctl.jtl")
        items = list(reader.datapoints(True))
        self.assertEqual(len(items), 1)
        items = list(reader.datapoints(True))
        self.assertEqual(len(items), 0)
        reader.register_file(RESOURCES_DIR + "jmeter/jtl/tranctl.jtl")
        reader.register_file(RESOURCES_DIR + "jmeter/jtl/tranctl.jtl")
        items = list(reader.datapoints(True))
        self.assertTrue(reader.read_records)
        self.assertEqual(len(items), 1)
Example #25
0
    def test_new_project_new_test(self):
        obj = BlazeMeterUploader()
        mock = BZMock(obj._user)
        mock.mock_get.update({
            'https://a.blazemeter.com/api/v4/tests?workspaceId=1&name=Taurus+Test': {'result': []},
            'https://a.blazemeter.com/api/v4/projects?workspaceId=1': {'result': []}
        })

        mock.mock_post.update({
            'https://a.blazemeter.com/api/v4/projects': {"result": {"id": 1}},
            'https://a.blazemeter.com/api/v4/tests': {"result": {"id": 1}},
        })

        obj.settings['token'] = '123'
        obj.settings['browser-open'] = 'none'
        obj.engine = EngineEmul()
        obj.prepare()
        self.assertEquals('https://a.blazemeter.com/api/v4/projects', mock.requests[6]['url'])
        self.assertEquals('POST', mock.requests[6]['method'])
        self.assertEquals('https://a.blazemeter.com/api/v4/tests', mock.requests[7]['url'])
        self.assertEquals('POST', mock.requests[7]['method'])
Example #26
0
    def test_full_generation(self):
        # check mqtt protocol handling: getting request, parsing of it, generation of jmx
        engine = EngineEmul()
        jmeter = MockJMeterExecutor()
        jmeter.engine = engine
        jmeter.configure({'scenario': 'sc1'})
        scenario = BetterDict.from_dict({
            'protocol': 'mqtt',
            'requests': [
                {'cmd': 'connect', 'addr': 'server.com'},
                {'cmd': 'disconnect'}
            ]})
        jmeter.engine.config.merge({'scenarios': {'sc1': scenario}})
        jmeter.settings.merge({'protocol-handlers': {'mqtt': 'bzt.jmx.mqtt.MQTTProtocolHandler'}})
        builder = JMeterScenarioBuilder(jmeter)
        elements = builder.compile_scenario(jmeter.get_scenario())
        self.assertEqual(4, len(elements))

        # appropriate classes has been generated
        self.assertEqual('net.xmeter.samplers.ConnectSampler', elements[0].attrib['testclass'])
        self.assertEqual('net.xmeter.samplers.DisConnectSampler', elements[2].attrib['testclass'])
Example #27
0
    def test_public_report(self):
        mock = BZMock()
        mock.mock_get.update({
            'https://a.blazemeter.com/api/v4/tests?workspaceId=1&name=Taurus+Test': {"result": []}
        })

        mock.mock_post.update({
            'https://a.blazemeter.com/api/v4/projects': {"result": {'id': 1}},
            'https://a.blazemeter.com/api/v4/tests': {'result': {'id': 'unittest1'}},
            'https://a.blazemeter.com/api/v4/tests/unittest1/start-external': {"result": {
                'session': {'id': 'sess1', 'userId': 1, 'testId': 1},
                'master': {'id': 'master1', 'userId': 1},
                'signature': ''
            }},
            'https://a.blazemeter.com/api/v4/masters/master1/public-token': {'result': {'publicToken': 'publicToken'}},
            'https://data.blazemeter.com/submit.php?session_id=sess1&signature=&test_id=1&user_id=1&pq=0&target=labels_bulk&update=1': {
                "result": {'session': {}}},
            'https://data.blazemeter.com/api/v4/image/sess1/files?signature=': {'result': True},
        })

        obj = BlazeMeterUploader()
        obj.settings['token'] = '123'
        obj.settings['browser-open'] = 'none'
        obj.settings['public-report'] = True
        obj.settings['send-monitoring'] = False
        obj.engine = EngineEmul()
        mock.apply(obj._user)
        self.sniff_log(obj.log)
        obj.prepare()
        obj.startup()
        obj.aggregated_second(random_datapoint(10))
        obj.check()
        obj.shutdown()
        obj.post_process()

        log_buff = self.log_recorder.info_buff.getvalue()
        log_line = "Public report link: https://a.blazemeter.com/app/?public-token=publicToken#/masters/master1/summary"
        self.assertIn(log_line, log_buff)
        ROOT_LOGGER.warning("\n".join([x['url'] for x in mock.requests]))
        self.assertEqual(14, len(mock.requests))
Example #28
0
    def test_no_notes_for_public_reporting(self):
        mock = BZMock()
        mock.mock_post.update({
            'https://a.blazemeter.com/api/v4/sessions/1/terminate-external': {},
            'https://data.blazemeter.com/submit.php?session_id=1&signature=None&test_id=1&user_id=1&pq=0&target=labels_bulk&update=1': {},
        })

        obj = BlazeMeterUploader()
        obj.parameters['project'] = 'Proj name'
        obj.settings['token'] = ''  # public reporting
        obj.settings['browser-open'] = 'none'
        obj.engine = EngineEmul()
        mock.apply(obj._user)
        obj.prepare()

        obj._session = Session(obj._user, {'id': 1, 'testId': 1, 'userId': 1})
        obj._master = Master(obj._user, {'id': 1})

        obj.engine.stopping_reason = ValueError('wrong value')
        obj.aggregated_second(random_datapoint(10))
        obj.kpi_buffer[-1][DataPoint.CUMULATIVE][''][KPISet.ERRORS] = [
            {'msg': 'Forbidden', 'cnt': 10, 'type': KPISet.ERRTYPE_ASSERT, 'urls': [], KPISet.RESP_CODES: '111',
             'tag': ""},
            {'msg': 'Allowed', 'cnt': 20, 'type': KPISet.ERRTYPE_ERROR, 'urls': [], KPISet.RESP_CODES: '222'}]
        obj.send_monitoring = False
        obj.post_process()

        # TODO: looks like this whole block of checks is useless
        # check for note appending in _postproc_phase3()
        reqs = [{'url': '', 'data': ''} for _ in range(4)]  # add template for minimal size
        reqs = (reqs + mock.requests)[-4:]
        self.assertNotIn('api/v4/sessions/1', reqs[0]['url'])
        self.assertNotIn('api/v4/sessions/1', reqs[1]['url'])
        self.assertNotIn('api/v4/masters/1', reqs[2]['url'])
        self.assertNotIn('api/v4/masters/1', reqs[3]['url'])
        if reqs[1]['data']:
            self.assertNotIn('ValueError: wrong value', reqs[1]['data'])
        if reqs[3]['data']:
            self.assertNotIn('ValueError: wrong value', reqs[3]['data'])
Example #29
0
    def test_multiple_local_monitorings(self):
        config1 = {'metrics': ['mem', 'engine-loop']}
        config2 = {'metrics': ['cpu', 'mem']}

        obj = Monitoring()
        obj.engine = EngineEmul()
        obj.parameters.merge({'local': [config1, config2]})

        obj.prepare()
        self.assertEqual(1, len(obj.clients))
        self.assertEqual({'mem', 'cpu', 'engine-loop'},
                         set(obj.clients[0].metrics))
        self.assertTrue(isinstance(obj.clients[0].monitor, LocalMonitor))

        obj.prepare()
        self.assertEqual(1, len(obj.clients))
        self.assertEqual({'mem', 'cpu', 'engine-loop'},
                         set(obj.clients[0].metrics))
        self.assertTrue(isinstance(obj.clients[0].monitor, LocalMonitor))

        data1 = obj.clients[0].get_data()
        obj.clients[0].interval = 1  # take cached data
        data2 = obj.clients[0].get_data()
        obj.clients[0].interval = 0  # throw cache
        data3 = obj.clients[0].get_data()
        for item1, item2, item3 in zip(data1, data2, data3):
            self.assertEqual(item1, item2)
            self.assertNotEqual(item2, item3)

        metrics = []
        for element in data1:
            self.assertIn('source', element)
            self.assertIn('ts', element)
            for key in element:
                if key not in ('source', 'ts'):
                    metrics.append(key)

        for config in (config1, config2):
            self.assertTrue(all(m in metrics for m in config['metrics']))
Example #30
0
    def test_blazemeter_cloud_report_link(self):
        obj = FinalStatus()
        obj.engine = EngineEmul()
        xml_report = obj.engine.create_artifact("status", ".xml")
        obj.parameters = BetterDict.from_dict({
            "dump-xml": xml_report,
        })

        prov = CloudProvisioning()
        prov.results_url = "http://report/link"
        obj.engine.provisioning = prov

        obj.startup()
        obj.shutdown()

        obj.aggregated_second(self.__get_datapoint())
        obj.post_process()

        self.assertTrue(os.path.exists(xml_report))
        with open(xml_report) as fds:
            report_content = fds.read()
        self.assertIn('<ReportURL>http://report/link</ReportURL>', report_content)