Пример #1
0
    def test_stop_counting_criteria(self):
        self.configure(
            {"criteria": ["avg-rt>10ms for 2s, continue as failed"]})
        self.obj.prepare()
        self.obj.get_widget()
        start_time = time.time()

        for _n in range(0, 10):
            point = random_datapoint(start_time)
            point[DataPoint.CURRENT]['']["avg_rt"] = 1.0
            self.obj.aggregated_second(point)
            self.obj.check()
            start_time += 1

        self.assertEqual(self.obj.widget.text_widget.text,
                         "Failed: avg-rt>10ms for 10 sec\n")

        for _n in range(0, 10):
            point = random_datapoint(start_time)
            point[DataPoint.CURRENT]['']["avg_rt"] = 0.01
            self.obj.aggregated_second(point)
            self.obj.check()
            start_time += 1

        self.assertEqual(self.obj.widget.text_widget.text, "")
Пример #2
0
    def test_bytes(self):
        self.configure({
            "criteria":
            [  # bytes number can only be generated in range from 1 to 1000
                "bytes>0 for 1s, continue as successful",
                "bytes<1kb for 1s, continue as successful",
                "bytes<1mib for 1s, continue as successful",
                "bytes<1b for 1s, continue as failed",
                "bytes>1024 for 1s, continue as failed",
            ]
        })
        self.obj.prepare()

        self.assertEqual(self.obj.processors[0].criteria[1].threshold,
                         1024)  # conversion check
        self.assertEqual(self.obj.processors[0].criteria[2].threshold,
                         1024 * 1024)
        self.assertEqual(self.obj.processors[0].criteria[3].threshold, 1)

        for n in range(0, 10):
            point = random_datapoint(n)
            self.obj.aggregated_second(point)
            self.obj.check()

        self.obj.shutdown()
        self.assertTrue(self.obj.criteria[0].is_triggered)
        self.assertTrue(self.obj.criteria[1].is_triggered)
        self.assertTrue(self.obj.criteria[2].is_triggered)
        self.assertFalse(self.obj.criteria[3].is_triggered)
        self.assertFalse(self.obj.criteria[4].is_triggered)
Пример #3
0
    def test_within(self):
        self.configure({
            "criteria": [
                "fail>10% within 5s",
                "fail>1000 within 5s",
                "avg-rt>100ms within 10s",
            ]
        })
        self.obj.prepare()

        start_time = time.time()
        for _n in range(0, 20):
            point = random_datapoint(start_time)
            self.obj.aggregated_second(point)
            if _n % 2 == 0:
                try:
                    self.obj.check()
                except KeyboardInterrupt:
                    pass

            try:
                self.obj.check()
            except KeyboardInterrupt:
                pass
            start_time += 1
Пример #4
0
    def test_excluded_cumulative(self):
        mock = BZMock()
        mock.mock_get.update({
            'https://a.blazemeter.com/api/v4/tests?projectId=1&name=Taurus+Test': {"result": []},
        })
        mock.mock_post.update({
            'https://a.blazemeter.com/api/v4/projects': {"result": {'id': 1}},
            'https://a.blazemeter.com/api/v4/tests': {"result": {'id': 1}},
            'https://a.blazemeter.com/api/v4/tests/1/start-external': {"result": {
                "session": {'id': 1, "testId": 1, "userId": 1},
                "master": {'id': 1},
                "signature": "sign"
            }},
            'https://data.blazemeter.com/api/v4/image/1/files?signature=sign': {"result": True},
            'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1' +
            '&pq=0&target=labels_bulk&update=1': {},
            'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=engine_health&update=1': {
                'result': {'session': {}}}
        })

        obj = BlazeMeterUploader()
        mock.apply(obj._user)
        obj.parameters['project'] = 'Proj name'
        obj.settings['token'] = '123'
        obj.settings['browser-open'] = 'none'
        obj.engine = EngineEmul()
        obj.prepare()
        obj.startup()
        obj.aggregated_second(random_datapoint(10))
        obj.kpi_buffer[-1][DataPoint.CUMULATIVE] = {}  # remove cumulative when ramp-up data is excluded
        obj.post_process()  # no 'Cumulative KPISet is non-consistent' exception here
Пример #5
0
    def test_rc_within(self):
        self.configure({
            "criteria": [
                "rc413>10 within 3s, stop as successful",  # this one to cover branch that caused bug
                "rc413>10 within 10s, stop as failed",
            ]
        })
        self.obj.prepare()
        self.assertEquals(len(self.obj.criteria), 2)

        for n in range(0, 10):
            point = random_datapoint(n)
            rcs = point[DataPoint.CURRENT][''][KPISet.RESP_CODES]
            rcs['413'] = 3
            self.obj.aggregated_second(point)
            try:
                self.obj.check()
            except AutomatedShutdown:
                break

            self.assertLess(n, 3)

        self.obj.shutdown()
        self.obj.post_process()
        self.assertFalse(self.obj.criteria[0].is_triggered)
        self.assertTrue(self.obj.criteria[1].is_triggered)
Пример #6
0
    def test_prepare2(self):
        self.configure({"criteria": ["avg-rt>10ms, continue as non-failed"]})
        self.obj.prepare()
        self.assertGreater(len(self.obj.criteria), 0)

        for n in range(0, 10):
            point = random_datapoint(n)
            self.obj.aggregated_second(point)
            self.obj.check()

        self.obj.post_process()
Пример #7
0
    def test_prepare3(self):
        self.configure(
            {"criteria": ["avg-rt>10ms for 3s, continue as failed"]})
        self.obj.prepare()
        self.assertGreater(len(self.obj.criteria), 0)

        for n in range(0, 10):
            point = random_datapoint(n)
            point[DataPoint.CURRENT][''][KPISet.AVG_RESP_TIME] = 1
            self.obj.aggregated_second(point)
            self.obj.check()

        self.assertRaises(AutomatedShutdown, self.obj.post_process)
Пример #8
0
    def test_dump(self):
        obj = FinalStatus()
        obj.engine = EngineEmul()
        obj.parameters = BetterDict.from_dict({
            "dump-xml": obj.engine.create_artifact("status", ".xml"),
            "dump-csv": obj.engine.create_artifact("status", ".csv")
        })
        self.sniff_log(obj.log)

        obj.aggregated_second(random_datapoint(time.time()))
        obj.startup()
        obj.shutdown()

        obj.post_process()
        self.assertIn("XML", self.log_recorder.info_buff.getvalue())
Пример #9
0
    def test_percentiles_track(self):
        self.configure({"criteria": ["p90>0ms"]})
        self.obj.prepare()
        self.assertGreater(len(self.obj.criteria), 0)

        for n in range(0, 10):
            point = random_datapoint(n)
            self.obj.aggregated_second(point)
            self.obj.check()

        self.obj.shutdown()
        try:
            self.obj.post_process()
            self.fail()
        except AutomatedShutdown:
            pass
Пример #10
0
    def test_rc_over1(self):
        self.configure({"criteria": [
            "rc200<8 over 5s",
        ]})

        self.obj.prepare()

        for n in range(0, 10):
            point = random_datapoint(n)
            rcs = point[DataPoint.CURRENT][''][KPISet.RESP_CODES]
            rcs['200'] = 3
            self.obj.aggregated_second(point)
            self.obj.check()
            self.assertFalse(self.obj.criteria[0].is_triggered)

        self.obj.shutdown()
        self.obj.post_process()
Пример #11
0
    def test_prepare(self):
        config = json.loads(open(RESOURCES_DIR + "json/passfail.json").read())
        self.configure(config['reporting'][0])
        self.obj.prepare()
        self.assertGreater(len(self.obj.criteria), 0)

        for n in range(0, 10):
            point = random_datapoint(n)
            ROOT_LOGGER.info("%s: %s", n, point)
            self.obj.aggregated_second(point)
            try:
                self.obj.check()
            except AutomatedShutdown:
                pass

        try:
            self.obj.post_process()
        except AutomatedShutdown:
            pass
Пример #12
0
    def test_cumulative_criteria_post_process(self):
        self.configure({
            "criteria": [
                "p90>0ms, continue as failed",
                "avg-rt>0ms, continue as failed",
            ]
        })
        self.obj.prepare()
        self.assertEquals(len(self.obj.criteria), 2)

        for n in range(0, 10):
            point = random_datapoint(n)
            self.obj.aggregated_second(point)
            self.obj.check()

        self.obj.shutdown()
        self.assertRaises(AutomatedShutdown, self.obj.post_process)
        for crit in self.obj.criteria:
            self.assertTrue(crit.is_triggered)
Пример #13
0
    def test_monitoring(self):
        self.configure({
            "criteria": [{
                "class": "bzt.modules.monitoring.MonitoringCriteria",
                "subject": "local/cpu",
                "condition": ">",
                "threshold": 90,
                "timeframe": "5s"
            }]
        })

        self.obj.prepare()

        for n in range(0, 10):
            point = random_datapoint(n)
            self.obj.aggregated_second(point)
            self.obj.check()

        self.obj.shutdown()
        self.assertFalse(self.obj.criteria[0].is_triggered)
Пример #14
0
    def test_rc_over2(self):
        self.configure({"criteria": [
            "rc200>8 over 3s",
        ]})
        self.obj.prepare()

        for n in range(0, 10):
            point = random_datapoint(n)
            rcs = point[DataPoint.CURRENT][''][KPISet.RESP_CODES]
            rcs['200'] = 5
            self.obj.aggregated_second(point)
            try:
                self.obj.check()
            except AutomatedShutdown:
                break

            self.assertLess(n, 3)

        self.assertTrue(self.obj.criteria[0].is_triggered)
        self.obj.shutdown()
        self.obj.post_process()
Пример #15
0
    def test_public_report(self):
        mock = BZMock()
        mock.mock_get.update({
            'https://a.blazemeter.com/api/v4/tests?workspaceId=1&name=Taurus+Test': {"result": []}
        })

        mock.mock_post.update({
            'https://a.blazemeter.com/api/v4/projects': {"result": {'id': 1}},
            'https://a.blazemeter.com/api/v4/tests': {'result': {'id': 'unittest1'}},
            'https://a.blazemeter.com/api/v4/tests/unittest1/start-external': {"result": {
                'session': {'id': 'sess1', 'userId': 1, 'testId': 1},
                'master': {'id': 'master1', 'userId': 1},
                'signature': ''
            }},
            'https://a.blazemeter.com/api/v4/masters/master1/public-token': {'result': {'publicToken': 'publicToken'}},
            'https://data.blazemeter.com/submit.php?session_id=sess1&signature=&test_id=1&user_id=1&pq=0&target=labels_bulk&update=1': {
                "result": {'session': {}}},
            'https://data.blazemeter.com/api/v4/image/sess1/files?signature=': {'result': True},
        })

        obj = BlazeMeterUploader()
        obj.settings['token'] = '123'
        obj.settings['browser-open'] = 'none'
        obj.settings['public-report'] = True
        obj.settings['send-monitoring'] = False
        obj.engine = EngineEmul()
        mock.apply(obj._user)
        self.sniff_log(obj.log)
        obj.prepare()
        obj.startup()
        obj.aggregated_second(random_datapoint(10))
        obj.check()
        obj.shutdown()
        obj.post_process()

        log_buff = self.log_recorder.info_buff.getvalue()
        log_line = "Public report link: https://a.blazemeter.com/app/?public-token=publicToken#/masters/master1/summary"
        self.assertIn(log_line, log_buff)
        ROOT_LOGGER.warning("\n".join([x['url'] for x in mock.requests]))
        self.assertEqual(14, len(mock.requests))
Пример #16
0
    def test_no_notes_for_public_reporting(self):
        mock = BZMock()
        mock.mock_post.update({
            'https://a.blazemeter.com/api/v4/sessions/1/terminate-external': {},
            'https://data.blazemeter.com/submit.php?session_id=1&signature=None&test_id=1&user_id=1&pq=0&target=labels_bulk&update=1': {},
        })

        obj = BlazeMeterUploader()
        obj.parameters['project'] = 'Proj name'
        obj.settings['token'] = ''  # public reporting
        obj.settings['browser-open'] = 'none'
        obj.engine = EngineEmul()
        mock.apply(obj._user)
        obj.prepare()

        obj._session = Session(obj._user, {'id': 1, 'testId': 1, 'userId': 1})
        obj._master = Master(obj._user, {'id': 1})

        obj.engine.stopping_reason = ValueError('wrong value')
        obj.aggregated_second(random_datapoint(10))
        obj.kpi_buffer[-1][DataPoint.CUMULATIVE][''][KPISet.ERRORS] = [
            {'msg': 'Forbidden', 'cnt': 10, 'type': KPISet.ERRTYPE_ASSERT, 'urls': [], KPISet.RESP_CODES: '111',
             'tag': ""},
            {'msg': 'Allowed', 'cnt': 20, 'type': KPISet.ERRTYPE_ERROR, 'urls': [], KPISet.RESP_CODES: '222'}]
        obj.send_monitoring = False
        obj.post_process()

        # TODO: looks like this whole block of checks is useless
        # check for note appending in _postproc_phase3()
        reqs = [{'url': '', 'data': ''} for _ in range(4)]  # add template for minimal size
        reqs = (reqs + mock.requests)[-4:]
        self.assertNotIn('api/v4/sessions/1', reqs[0]['url'])
        self.assertNotIn('api/v4/sessions/1', reqs[1]['url'])
        self.assertNotIn('api/v4/masters/1', reqs[2]['url'])
        self.assertNotIn('api/v4/masters/1', reqs[3]['url'])
        if reqs[1]['data']:
            self.assertNotIn('ValueError: wrong value', reqs[1]['data'])
        if reqs[3]['data']:
            self.assertNotIn('ValueError: wrong value', reqs[3]['data'])
Пример #17
0
    def test_prepare_label_issue(self):
        # https://groups.google.com/forum/?utm_medium=email&utm_source=footer#!msg/codename-taurus/PWjU7xVucZ0/WkjUAbE1EwAJ
        self.configure({"criteria": ["avg-rt of spaced label>10ms"]})
        self.obj.prepare()
        self.assertGreater(len(self.obj.criteria), 0)

        for n in range(0, 10):
            point = random_datapoint(n)
            point[DataPoint.CUMULATIVE]['spaced label'] = point[
                DataPoint.CUMULATIVE]['']
            point[DataPoint.CURRENT]['spaced label'] = point[
                DataPoint.CURRENT]['']
            self.obj.aggregated_second(point)
            self.obj.check()

        self.obj.shutdown()

        try:
            self.obj.post_process()
            self.fail()
        except AutomatedShutdown:
            pass
Пример #18
0
    def test_executor_level(self):
        executor = ModuleMock()
        executor.engine = self.obj.engine
        self.obj.engine.provisioning.executors.append(executor)
        executor.execution.merge({
            "criteria": ["p90>0ms"],
            "scenario": {
                "criteria": ["p50>0ms"]
            }
        })
        self.obj.prepare()
        self.assertEqual(len(self.obj.criteria), 2)

        for n in range(0, 10):
            point = random_datapoint(n)
            self.obj.aggregated_second(point)
            self.obj.check()

        self.obj.shutdown()
        try:
            self.obj.post_process()
            self.fail()
        except AutomatedShutdown:
            pass
Пример #19
0
    def test_check(self):
        mock = BZMock()
        mock.mock_get.update({
            'https://a.blazemeter.com/api/v4/tests?workspaceId=1&name=Taurus+Test':
            {
                "result": []
            },
            'https://a.blazemeter.com/api/v4/tests?projectId=1&name=Taurus+Test':
            {
                "result": []
            },
            'https://a.blazemeter.com/api/v4/projects?workspaceId=1&name=Proj+name':
            {
                "result": []
            },
        })
        mock.mock_post.update({
            'https://a.blazemeter.com/api/v4/projects': {
                "result": {
                    "id": 1,
                    "name": "boo",
                    "userId": 2,
                    "description": None,
                    "created": time.time(),
                    "updated": time.time(),
                    "organizationId": None
                }
            },
            'https://a.blazemeter.com/api/v4/tests': {
                "result": {
                    'id': 1
                }
            },
            'https://a.blazemeter.com/api/v4/tests/1/start-external': {
                "result": {
                    'session': {
                        'id': 1,
                        'userId': 1,
                        'testId': 1
                    },
                    'master': {
                        'id': 1,
                        'userId': 1
                    },
                    'signature': 'sign'
                }
            },
            'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=labels_bulk&update=1':
            [
                {},
                {
                    "result": {
                        'session': {
                            "statusCode": 140,
                            'status': 'ENDED'
                        }
                    }
                },
                {},
            ],
            'https://data.blazemeter.com/api/v4/image/1/files?signature=sign':
            [
                IOError("monitoring push expected fail"),
                {
                    "result": True
                },
                {
                    "result": True
                },
                {
                    "result": True
                },
                {
                    "result": True
                },
                {
                    "result": True
                },
                {
                    "result": True
                },
                {
                    "result": True
                },
                {
                    "result": True
                },
            ],
            'https://a.blazemeter.com/api/v4/sessions/1/stop': {},
            'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=engine_health&update=1':
            {
                "result": {
                    'session': {}
                }
            }
        })

        obj = BlazeMeterUploader()
        obj.parameters['project'] = 'Proj name'
        obj.settings['token'] = '123'
        obj.settings['browser-open'] = 'none'
        obj.engine = EngineEmul()
        shutil.copy(
            __file__,
            os.path.join(obj.engine.artifacts_dir, os.path.basename(__file__)))
        mock.apply(obj._user)
        obj._user.timeout = 0.1
        obj.prepare()
        obj.startup()
        for x in range(0, 31):
            obj.aggregated_second(random_datapoint(x))
        mon = [{
            "ts": 1,
            "source": "local",
            "cpu": 1,
            "mem": 2,
            "bytes-recv": 100,
            "other": 0
        }]
        obj.monitoring_data(mon)
        obj.check()
        for x in range(32, 65):
            obj.aggregated_second(random_datapoint(x))
        obj.last_dispatch = time.time() - 2 * obj.send_interval
        self.assertRaises(KeyboardInterrupt, obj.check)
        obj.aggregated_second(random_datapoint(10))
        obj.shutdown()
        log_file = obj.engine.create_artifact('log', '.tmp')
        handler = logging.FileHandler(log_file)
        obj.engine.log.parent.addHandler(handler)
        obj.engine.config.get('modules').get('shellexec').get(
            'env')['TAURUS_INDEX_ALL'] = 1
        obj.post_process()
        self.assertEqual(20, len(mock.requests))
        obj.engine.log.parent.removeHandler(handler)
Пример #20
0
    def test_some_errors(self):
        mock = BZMock()
        mock.mock_get.update({
            'https://a.blazemeter.com/api/v4/tests?projectId=1&name=Taurus+Test':
            {
                "result": []
            },
            'https://a.blazemeter.com/api/v4/projects?workspaceId=1&name=Proj+name':
            {
                "result": []
            },
            'https://a.blazemeter.com/api/v4/sessions/1': {
                "result": {
                    'id': 1,
                    "note": "somenote"
                }
            },
            'https://a.blazemeter.com/api/v4/masters/1': {
                "result": {
                    'id': 1,
                    "note": "somenote"
                }
            },
        })
        mock.mock_post.update({
            'https://a.blazemeter.com/api/v4/projects': {
                "result": {
                    'id': 1
                }
            },
            'https://a.blazemeter.com/api/v4/tests': {
                "result": {
                    'id': 1
                }
            },
            'https://a.blazemeter.com/api/v4/tests/1/start-external': {
                "result": {
                    "session": {
                        'id': 1,
                        "testId": 1,
                        "userId": 1
                    },
                    "master": {
                        'id': 1
                    },
                    "signature": "sign"
                }
            },
            'https://data.blazemeter.com/api/v4/image/1/files?signature=sign':
            {
                "result": True
            },
            'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1' + '&pq=0&target=labels_bulk&update=1':
            {},
            'https://a.blazemeter.com/api/v4/sessions/1/stop': {
                "result": True
            },
            'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=engine_health&update=1':
            {
                'result': {
                    'session': {}
                }
            }
        })

        mock.mock_patch.update({
            'https://a.blazemeter.com/api/v4/sessions/1': {
                "result": {
                    "id": 1,
                    "note": "somenote"
                }
            },
            'https://a.blazemeter.com/api/v4/masters/1': {
                "result": {
                    "id": 1,
                    "note": "somenote"
                }
            },
        })

        obj = BlazeMeterUploader()
        mock.apply(obj._user)
        obj.parameters['project'] = 'Proj name'
        obj.settings['token'] = '123'
        obj.settings['browser-open'] = 'none'
        obj.engine = EngineEmul()
        obj.prepare()
        obj.startup()
        obj.engine.stopping_reason = ValueError('wrong value')
        obj.aggregated_second(random_datapoint(10))
        obj.kpi_buffer[-1][DataPoint.CUMULATIVE][''][KPISet.ERRORS] = [{
            'msg':
            'Forbidden',
            'cnt':
            10,
            'type':
            KPISet.ERRTYPE_ASSERT,
            'urls': [],
            KPISet.RESP_CODES:
            '111',
            'tag':
            None
        }, {
            'msg':
            'Allowed',
            'cnt':
            20,
            'type':
            KPISet.ERRTYPE_ERROR,
            'urls': [],
            KPISet.RESP_CODES:
            '222'
        }, {
            'msg':
            'Not Found',
            'cnt':
            10,
            'type':
            KPISet.ERRTYPE_SUBSAMPLE,
            'urls': {
                '/non': '404'
            },
            KPISet.RESP_CODES:
            '404',
            'tag':
            None
        }]
        obj.post_process()
        obj.log.info("Requests: %s", mock.requests)

        # check for note appending in _postproc_phase3()
        reqs = mock.requests[-4:]
        self.assertIn('api/v4/sessions/1', reqs[0]['url'])
        self.assertIn('api/v4/sessions/1', reqs[1]['url'])
        self.assertIn('api/v4/masters/1', reqs[2]['url'])
        self.assertIn('api/v4/masters/1', reqs[3]['url'])
        self.assertIn('ValueError: wrong value', str(reqs[1]['data']))
        self.assertIn('ValueError: wrong value', str(reqs[3]['data']))

        labels = mock.requests[8]['data']
        if not isinstance(labels, str):
            labels = labels.decode("utf-8")
        obj.log.info("Labels: %s", labels)
        data = json.loads(str(labels))
        self.assertEqual(1, len(data['labels']))
        total_item = data['labels'][0]
        self.assertEqual('ALL', total_item['name'])
        self.assertEqual(total_item['assertions'], [{
            'failureMessage': 'Forbidden',
            'failures': 10,
            'name': 'All Assertions'
        }])
        self.assertEqual(total_item['errors'], [{
            'm': 'Allowed',
            'count': 20,
            'rc': '222'
        }])
        self.assertEqual(total_item['failedEmbeddedResources'],
                         [{
                             'url': '/non',
                             'count': 10,
                             'rc': '404',
                             'rm': 'Not Found'
                         }])