Exemple #1
0
def get_fail_reader_alot(offset=0):
    mock = MockReader()
    for x in range(2, 200):
        rnd = int(random() * x)
        mock.data.append((x + offset, "first", 1, r(), r(), r(), 200,
                          (random_string(1 + rnd)), '', 0))
    return mock
Exemple #2
0
def get_success_reader_alot(prefix='', offset=0):
    mock = MockReader()
    for x in range(2, 200):
        rnd = int(random() * x)
        mock.data.append((x + offset, prefix + random_string(1 + rnd), 1, r(),
                          r(), r(), 200, '', '', 0))
    return mock
    def test_speed(self):
        obj = self.obj

        mock = MockReader()
        mock.buffer_scale_idx = '100.0'
        obj.add_listener(mock)

        res = {}
        # current measurements shows ~25K samples/sec
        for cnt in (10, 100, 1000, 10000, 25000, 40000, 50000):
            for a in range(0, cnt):
                sample = (cnt, "", 1, r(1000), r(1000), r(1000), rc(), err(), '', 0)
                mock.data.append(sample)
            before = time.time()
            for point in mock.datapoints():
                pass
            after = time.time()
            res[cnt] = after - before
            logging.info("Times: %s", res)

            while mock.results:
                point = mock.results.pop(0)
                overall = point[DataPoint.CURRENT]['']
                self.assertTrue(len(overall[KPISet.PERCENTILES]) > 0)

        for point in mock.datapoints(True):
            pass
    def test_speed(self):
        obj = self.obj

        mock = MockReader()

        obj.add_listener(mock)

        res = {}
        # current measurements shows ~25K samples/sec
        for cnt in (10, 100, 1000, 10000, 25000, 40000, 50000):
            for a in range(0, cnt):
                sample = (cnt, "", 1, r(1000), r(1000), r(1000), rc(), err(),
                          '')
                mock.data.append(sample)
            before = time.time()
            for point in mock.datapoints():
                pass
            after = time.time()
            res[cnt] = after - before
            logging.info("Times: %s", res)

            while mock.results:
                point = mock.results.pop(0)
                overall = point[DataPoint.CURRENT]['']
                self.assertTrue(len(overall[KPISet.PERCENTILES]) > 0)

        for point in mock.datapoints(True):
            pass
def get_success_reader_shrinking_labels(max_label_size=20, count=500):
    mock = MockReader()
    half_size = max_label_size // 2
    for x in range(2, count):
        target_size = max_label_size - int(float(half_size) * float(x) / float(count))
        label = random_url(target_size)
        mock.data.append((x, label, 1, r(), r(), r(), 200, '', '', 0))
    return mock
Exemple #6
0
def get_success_reader_shrinking_labels(max_label_size=20, count=500):
    mock = MockReader()
    half_size = max_label_size // 2
    for x in range(2, count):
        target_size = max_label_size - int(
            float(half_size) * float(x) / float(count))
        label = random_url(target_size)
        mock.data.append((x, label, 1, r(), r(), r(), 200, '', '', 0))
    return mock
def get_success_reader_selected_labels(offset=0):
    mock = MockReader()
    labels = ['http://blazedemo.com/reserve.php',
              'http://blazedemo.com/purchase.php',
              'http://blazedemo.com/vacation.html',
              'http://blazedemo.com/confirmation.php',
              'http://blazedemo.com/another.php']
    for x in range(2, 200):
        mock.data.append((x + offset, choice(labels), 1, r(), r(), r(), 200, '', '', 0))
    return mock
Exemple #8
0
def test_unimplemented_file_data_provider():
    class DummyFileDataProvider(FileDataProvider):
        pass

    assert DummyFileDataProvider.handles(None) is False

    with pytest.raises(NotImplementedError):
        DummyFileDataProvider.handles(r('data/example.json'))

    with pytest.raises(NotImplementedError):
        DummyFileDataProvider(r('data/example.json'))
Exemple #9
0
def test_reserved_keywords(config):
    with pytest.raises(ValueError) as exc_info:
        config.set('class', 3)
    assert str(
        exc_info.value) == "Can't use reserved name 'class' as config item"

    with pytest.raises(ValueError) as exc_info:
        config.load(r('config/raise.json'))
    assert str(
        exc_info.value) == "Can't use reserved name 'raise' as config item"

    config.load_into('other', r('config/raise.json'))
    assert config.other["key"] == "value"
Exemple #10
0
def get_success_reader_selected_labels(offset=0):
    mock = MockReader()
    labels = [
        'http://blazedemo.com/reserve.php',
        'http://blazedemo.com/purchase.php',
        'http://blazedemo.com/vacation.html',
        'http://blazedemo.com/confirmation.php',
        'http://blazedemo.com/another.php'
    ]
    for x in range(2, 200):
        mock.data.append(
            (x + offset, choice(labels), 1, r(), r(), r(), 200, '', '', 0))
    return mock
Exemple #11
0
def test_exit_code_success():
    options = build_options(tests_path=r('runner/scenario1'),
                            config=None,
                            with_tags=["pass"],
                            without_tags=[])

    code = Runner(options).run()
    assert code == 0
Exemple #12
0
def test_exit_code_failure():
    options = build_options(tests_path=r('runner/scenario1'),
                            config=None,
                            with_tags=["fail"],
                            without_tags=[])

    code = Runner(options).run()
    assert code != 0
Exemple #13
0
def test_filtering_from_tags_in_code():
    options = build_options(tests_path=r('runner/scenario1'),
                            config=None,
                            with_tags=["tag1", "tag404"],
                            without_tags=[])
    iterations = collect_iteration_names(options)
    assert iterations == [
        "json-iteration1", "json-iteration2", "yaml-iteration1",
        "yaml-iteration2"
    ]

    options = build_options(tests_path=r('runner/scenario1'),
                            config=None,
                            with_tags=["tag2"],
                            without_tags=["tag1"])
    iterations = collect_iteration_names(options)
    assert iterations == [None]  # AnotherCase with NullDataProvider
Exemple #14
0
def test_load_empty_yaml():
    data = DataProviderRegistry.data_provider_for(r('data/empty.yaml'))
    assert data.name is None
    assert data.description is None
    assert data.tags is None
    assert data.setup_data is None
    iterations = [i for i in data.iterations]
    assert iterations == []
    assert data.tear_down_data is None
Exemple #15
0
def test_env_settings_loaded():
    options = build_options(config=r('runner/marvin_env_test.yaml'),
                            tests_path=None,
                            with_tags=None,
                            without_tags=None)
    cfg = Runner(options)._suite.cfg
    assert cfg.service_x['url'] == 'http://dev.example.com'
    assert cfg.service_x['timeout'] == 42
    assert cfg.other['important'] == 'setting'

    with env_var('MARVIN_ENV', 'production'):
        options = build_options(config=r('runner/marvin_env_test.yaml'),
                                tests_path=None,
                                with_tags=None,
                                without_tags=None)
        cfg = Runner(options)._suite.cfg
        assert cfg.service_x['url'] == 'http://prod.example.com'
        assert cfg.service_x['timeout'] == 42
        assert cfg.other['important'] == 'setting'
Exemple #16
0
def test_hook_module_loaded():
    options = build_options(config=r('runner/marvin_hook_test.yaml'),
                            tests_path=None,
                            with_tags=None,
                            without_tags=None)

    assert os.environ.get('SOME_MARVIN_TEST') is None
    suite = Runner(options)._suite
    assert os.environ.get('SOME_MARVIN_TEST') == "%s - %s" % (id(
        suite.publisher), id(suite.cfg))
Exemple #17
0
def test_iterations_no_data_key_and_unexpected_keys():
    data = DataProviderRegistry.data_provider_for(r('data/iterations_unexpected_keys.yaml'))
    assert data.name == 'Test unexpected keys'
    assert data.description == 'Should not fail'
    assert data.tags == {'bunch', 'of', 'tags'}
    assert data.setup_data == {'a_list': [1, 2]}
    iterations = [(i.name, i.description, i.tags, i.data) for i in data.iterations]
    assert iterations == [
        ('First iteration', None, None, None)
    ]
    assert data.tear_down_data == {'foo': 'baz'}
Exemple #18
0
 def __get_datapoint(self, n):
     point = DataPoint(n)
     overall = point[DataPoint.CURRENT].get('', KPISet())
     overall[KPISet.CONCURRENCY] = r(100)
     overall[KPISet.SAMPLE_COUNT] = int(100 * r(1000))
     overall[KPISet.FAILURES] = overall[KPISet.SAMPLE_COUNT] / 2.0
     overall[KPISet.AVG_RESP_TIME] = r(100)
     overall[KPISet.AVG_CONN_TIME] = overall[KPISet.AVG_RESP_TIME] / 3.0
     overall[KPISet.AVG_LATENCY] = 2.0 * overall[KPISet.AVG_RESP_TIME] / 3.0
     overall[KPISet.PERCENTILES]['25'] = r(10)
     overall[KPISet.PERCENTILES]['50'] = r(20)
     overall[KPISet.PERCENTILES]['75'] = r(30)
     overall[KPISet.PERCENTILES]['90'] = r(40)
     overall[KPISet.PERCENTILES]['99'] = r(50)
     overall[KPISet.PERCENTILES]['100'] = r(100)
     overall[KPISet.RESP_CODES][rc()] = 1
     return point
Exemple #19
0
def test_json_data_provider():
    data = DataProviderRegistry.data_provider_for(r('data/example.json'))
    assert data.name == 'Test Name'
    assert data.description == 'very descriptive'
    assert data.tags == {'bunch', 'of', 'tags'}
    assert data.setup_data == {'a_list': [1, 2], 'foo': 'bar'}
    iterations = [(i.name, i.description, i.tags, i.data) for i in data.iterations]
    assert iterations == [
        (None, None, None, {'arg1': 'iteration 1', 'arg2': True}),
        (None, None, None, {'arg1': 'iteration 2', 'arg2': False})
    ]
    assert data.tear_down_data == {'foo': 'baz'}
 def __get_datapoint(self, n):
     point = DataPoint(n)
     overall = point[DataPoint.CURRENT].get('', KPISet())
     overall[KPISet.CONCURRENCY] = r(100)
     overall[KPISet.SAMPLE_COUNT] = int(100 * r(1000))
     overall[KPISet.FAILURES] = overall[KPISet.SAMPLE_COUNT] / 2.0
     overall[KPISet.AVG_RESP_TIME] = r(100)
     overall[KPISet.AVG_CONN_TIME] = overall[KPISet.AVG_RESP_TIME] / 3.0
     overall[KPISet.AVG_LATENCY] = 2.0 * overall[
         KPISet.AVG_RESP_TIME] / 3.0
     overall[KPISet.PERCENTILES]['25'] = r(10)
     overall[KPISet.PERCENTILES]['50'] = r(20)
     overall[KPISet.PERCENTILES]['75'] = r(30)
     overall[KPISet.PERCENTILES]['90'] = r(40)
     overall[KPISet.PERCENTILES]['99'] = r(50)
     overall[KPISet.PERCENTILES]['100'] = r(100)
     overall[KPISet.RESP_CODES][rc()] = 1
     return point
Exemple #21
0
def test_loaded_configuration():
    expected_default = default_config()
    expected_from_file = default_config()
    expected_from_file.update({
        'tests_path': 'scenario1/',
        'filter': {
            'with_tags': ['include-this'],
            'without_tags': ['exclude-this']
        },
        'some_setting': 'some value'
    })
    expected_file_with_override = copy.deepcopy(expected_from_file)
    expected_file_with_override['tests_path'] = 'another/path'

    options = build_options(config=None,
                            tests_path=None,
                            with_tags=None,
                            without_tags=None)
    assert Runner(options)._suite.cfg.marvin == expected_default

    options = build_options(config=r('runner/marvin.yaml'),
                            tests_path=None,
                            with_tags=None,
                            without_tags=None)
    assert Runner(options)._suite.cfg.marvin == expected_from_file

    with env_var('MARVIN_CONFIG', r('runner/marvin.yaml')):
        options = build_options(config=None,
                                tests_path=None,
                                with_tags=None,
                                without_tags=None)
        assert Runner(options)._suite.cfg.marvin == expected_from_file

    options = build_options(config=r('runner/marvin.yaml'),
                            tests_path='another/path',
                            with_tags=None,
                            without_tags=None)
    assert Runner(options)._suite.cfg.marvin == expected_file_with_override
Exemple #22
0
def test_suite_generation():
    options = build_options(tests_path=r('runner/scenario1'),
                            with_tags=[],
                            without_tags=[],
                            config=None)
    collected = [(t.__name__, d.__class__, (d.setup_data or {}).get('name'))
                 for (t, d) in Runner(options)._suite.tests()]

    assert ("VerifySomething", YAMLDataProvider,
            "verify_something.data1.yaml") in collected
    assert ("VerifySomething", YAMLDataProvider,
            "verify_something.json") in collected
    assert ("AnotherCase", NullDataProvider, None) in collected
    assert len(collected) == 3
Exemple #23
0
def test_filtering_from_tags_in_data():
    # No filters runs all
    options = build_options(tests_path=r('runner/scenario2'),
                            config=None,
                            with_tags=[],
                            without_tags=[])
    iterations = collect_iteration_names(options)
    assert iterations == [
        'Iteration A', 'Iteration B', 'Iteration C', 'Iteration D'
    ]

    # matching whole data tag
    options = build_options(tests_path=r('runner/scenario2'),
                            config=None,
                            with_tags=['data1'],
                            without_tags=[])
    iterations = collect_iteration_names(options)
    assert iterations == ['Iteration A', 'Iteration B']

    # skipping whole data tag
    options = build_options(tests_path=r('runner/scenario2'),
                            config=None,
                            with_tags=[],
                            without_tags=['data1'])
    iterations = collect_iteration_names(options)
    assert iterations == ['Iteration C', 'Iteration D']

    # matching single iteration
    options = build_options(tests_path=r('runner/scenario2'),
                            config=None,
                            with_tags=['iterationA'],
                            without_tags=[''])
    iterations = collect_iteration_names(options)
    assert iterations == ['Iteration A']

    # skipping single iteration
    options = build_options(tests_path=r('runner/scenario2'),
                            config=None,
                            with_tags=[],
                            without_tags=['iterationB'])
    iterations = collect_iteration_names(options)
    assert iterations == ['Iteration A', 'Iteration C', 'Iteration D']

    # matching single iterations in multiple files
    options = build_options(tests_path=r('runner/scenario2'),
                            config=None,
                            with_tags=['regression'],
                            without_tags=[])
    iterations = collect_iteration_names(options)
    assert iterations == ['Iteration A', 'Iteration C']
def get_fail_reader(offset=0):
    mock = MockReader()
    mock.data.append((1 + offset, "first", 1, r(), r(), r(), 200, 'FAILx3', '', 0))
    mock.data.append((2 + offset, "first", 1, r(), r(), r(), 200, 'FAILx1', '', 0))
    mock.data.append((5 + offset, "first", 1, r(), r(), r(), 200, None, '', 0))
    mock.data.append((7 + offset, "second", 1, r(), r(), r(), 200, 'FAILx3', '', 0))
    mock.data.append((3 + offset, "first", 1, r(), r(), r(), 200, 'FAILx3', '', 0))
    mock.data.append((6 + offset, "second", 1, r(), r(), r(), 200, 'unique FAIL', '', 0))
    return mock
    def test_negative_response_time_scaling_crash(self):
        self.obj.track_percentiles = [0.0, 50.0, 95.0, 99.0, 100.0]
        self.obj.prepare()

        self.sniff_log(self.obj.log)

        mock = MockReader()
        mock.data.append((1, "first", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
        mock.data.append((2, "first", 1, -r(), r(), r(), 200, 'FAILx1', '', 0))
        mock.data.append((5, "first", 1, -r(), r(), r(), 200, None, '', 0))
        mock.data.append((7, "second", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
        mock.data.append((3, "first", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
        mock.data.append((6, "second", 1, -r(), r(), r(), 200, 'unique FAIL', '', 0))

        self.obj.add_underling(mock)

        self.obj.check()
        for point in self.obj.datapoints():
            self.obj.log.info(to_json(point))

        self.assertIn("Negative response time reported", self.log_recorder.warn_buff.getvalue())
 def get_fail_reader(offset=0):
     mock = MockReader()
     mock.data.append(
         (1 + offset, "first", 1, r(), r(), r(), 200, 'FAILx3', '', 0))
     mock.data.append(
         (2 + offset, "first", 1, r(), r(), r(), 200, 'FAILx1', '', 0))
     mock.data.append(
         (5 + offset, "first", 1, r(), r(), r(), 200, None, '', 0))
     mock.data.append(
         (7 + offset, "second", 1, r(), r(), r(), 200, 'FAILx3', '', 0))
     mock.data.append(
         (3 + offset, "first", 1, r(), r(), r(), 200, 'FAILx3', '', 0))
     mock.data.append((6 + offset, "second", 1, r(), r(), r(), 200,
                       'unique FAIL', '', 0))
     return mock
Exemple #27
0
def test_config_load_json(config):
    config.load(r('config/some_json.json'))
    assert config.some_json['important_setting'] == 42
    def test_json(self):
        obj = self.obj

        mock = MockReader()
        mock.buffer_scale_idx = '100.0'
        mock.data.append((1, "", 1, r(), r(), r(), 200, None, '', 0))
        mock.data.append((2, "", 1, r(), r(), r(), 200, None, '', 0))
        mock.data.append((2, "", 1, r(), r(), r(), 200, None, '', 0))
        mock.data.append((3, "", 1, r(), r(), r(), 200, None, '', 0))
        mock.data.append((3, "", 1, r(), r(), r(), 200, None, '', 0))
        mock.data.append((4, "", 1, r(), r(), r(), 200, None, '', 0))
        mock.data.append((4, "", 1, r(), r(), r(), 200, None, '', 0))

        obj.add_listener(mock)

        for point in mock.datapoints(True):
            pass

        for point in mock.results:
            serialized = json.loads(to_json(point))
            rt_keys = serialized["current"][""]["rt"].keys()
            for key in rt_keys:
                rt = float(key)
                self.assertGreaterEqual(rt, 1.0)
                self.assertLessEqual(rt, 2.0)
    def test_negative_response_time_scaling_crash(self):
        obj = ConsolidatingAggregator()
        obj.track_percentiles = [0.0, 50.0, 95.0, 99.0, 100.0]
        obj.prepare()

        self.sniff_log(obj.log)

        mock = MockReader()
        mock.data.append((1, "first", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
        mock.data.append((2, "first", 1, -r(), r(), r(), 200, 'FAILx1', '', 0))
        mock.data.append((5, "first", 1, -r(), r(), r(), 200, None, '', 0))
        mock.data.append(
            (7, "second", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
        mock.data.append((3, "first", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
        mock.data.append(
            (6, "second", 1, -r(), r(), r(), 200, 'unique FAIL', '', 0))

        obj.add_underling(mock)

        obj.check()
        for point in obj.datapoints():
            obj.log.info(to_json(point))

        self.assertIn("Negative response time reported",
                      self.log_recorder.warn_buff.getvalue())
Exemple #30
0
def test_unsupported_extension(config):
    with pytest.raises(ValueError) as exc_info:
        config.load(r('config/settings.ini'))

    assert str(exc_info.value) == "Unsupported config extension: '.ini'"
Exemple #31
0
def test_config_load_yaml(config):
    config.load(r('config/dummy_config.yml'))
    assert config.dummy_config['a_nested']['a_list'] == [
        'test1', 'test2', 'test3'
    ]
Exemple #32
0
def test_settings_merge_if_same_basename(config):
    config.load(r('config/dummy_config.yml'))
    config.load(r('config/config_override/dummy_config.yaml'))
    assert config.dummy_config['some_key'] == 'with a value'
    assert config.dummy_config['extra_key'] == 'bar'
    assert config.dummy_config['a_test'] is None
Exemple #33
0
def test_load_in_different_namespace(config):
    config.load_into('test', r('config/some_json.json'))
    assert config.test['important_setting'] == 42
Exemple #34
0
def test_config_load_multiple_files(config):
    config.load(r('config/dummy_config.yml'), r('config/some_json.json'))
    assert config.dummy_config['some_key'] == 'with a value'
    assert config.some_json['important_setting'] == 42
def get_fail_reader_alot(offset=0):
    mock = MockReader()
    for x in range(2, 200):
        rnd = int(random() * x)
        mock.data.append((x + offset, "first", 1, r(), r(), r(), 200, (random_string(1 + rnd)), '', 0))
    return mock
    def test_1(self):
        obj = self.obj

        mock = MockReader()
        mock.data.append((1, "", 1, r(), r(), r(), 200, None, ''))
        mock.data.append((2, "", 1, r(), r(), r(), 200, None, ''))
        mock.data.append((2, "", 1, r(), r(), r(), 200, None, ''))
        mock.data.append((3, "", 1, r(), r(), r(), 200, None, ''))
        mock.data.append((3, "", 1, r(), r(), r(), 200, None, ''))
        mock.data.append((4, "", 1, r(), r(), r(), 200, None, ''))
        mock.data.append((4, "", 1, r(), r(), r(), 200, None, ''))

        obj.add_listener(mock)

        for point in mock.datapoints():
            self.assertNotEquals(
                0, point[DataPoint.CUMULATIVE][''][KPISet.CONCURRENCY])

        mock.data.append((2, "", 1, r(), r(), r(), 200, None, ''))
        mock.data.append((2, "", 1, r(), r(), r(), 200, None, ''))

        for point in mock.datapoints():
            pass

        for point in mock.datapoints(True):
            pass

        for point in mock.results:
            overall = point[DataPoint.CURRENT]['']
            self.assertTrue(len(overall[KPISet.PERCENTILES]) > 0)
def get_success_reader(offset=0):
    mock = MockReader()
    mock.data.append((1 + offset, "first", 1, r(), r(), r(), 200, None, '', 0))
    mock.data.append((2 + offset, "second", 1, r(), r(), r(), 200, None, '', 0))
    mock.data.append((2 + offset, "first", 1, r(), r(), r(), 200, None, '', 0))
    mock.data.append((3 + offset, "second", 1, r(), r(), r(), 200, None, '', 0))
    mock.data.append((3 + offset, "first", 1, r(), r(), r(), 200, None, '', 0))
    mock.data.append((4 + offset, "third", 1, r(), r(), r(), 200, None, '', 0))
    mock.data.append((4 + offset, "first", 1, r(), r(), r(), 200, None, '', 0))
    mock.data.append((6 + offset, "second", 1, r(), r(), r(), 200, None, '', 0))
    mock.data.append((6 + offset, "third", 1, r(), r(), r(), 200, None, '', 0))
    mock.data.append((6 + offset, "first", 1, r(), r(), r(), 200, None, '', 0))
    mock.data.append((5 + offset, "first", 1, r(), r(), r(), 200, None, '', 0))
    return mock
    def test_1(self):
        obj = self.obj

        mock = MockReader()
        mock.data.append((1, "", 1, r(), r(), r(), 200, None))
        mock.data.append((2, "", 1, r(), r(), r(), 200, None))
        mock.data.append((2, "", 1, r(), r(), r(), 200, None))
        mock.data.append((3, "", 1, r(), r(), r(), 200, None))
        mock.data.append((3, "", 1, r(), r(), r(), 200, None))
        mock.data.append((4, "", 1, r(), r(), r(), 200, None))
        mock.data.append((4, "", 1, r(), r(), r(), 200, None))

        obj.add_listener(mock)

        for point in mock.datapoints():
            pass

        mock.data.append((2, "", 1, r(), r(), r(), 200, None))
        mock.data.append((2, "", 1, r(), r(), r(), 200, None))

        for point in mock.datapoints():
            pass

        for point in mock.datapoints(True):
            pass

        for point in mock.results:
            overall = point[DataPoint.CURRENT]['']
            self.assertTrue(len(overall[KPISet.PERCENTILES]) > 0)
 def get_success_reader(offset=0):
     mock = MockReader()
     mock.data.append((1 + offset, "", 1, r(), r(), r(), 200, None, '', 0))
     mock.data.append((2 + offset, "", 1, r(), r(), r(), 200, None, '', 0))
     mock.data.append((2 + offset, "", 1, r(), r(), r(), 200, None, '', 0))
     mock.data.append((3 + offset, "", 1, r(), r(), r(), 200, None, '', 0))
     mock.data.append((3 + offset, "", 1, r(), r(), r(), 200, None, '', 0))
     mock.data.append((4 + offset, "", 1, r(), r(), r(), 200, None, '', 0))
     mock.data.append((4 + offset, "", 1, r(), r(), r(), 200, None, '', 0))
     mock.data.append((6 + offset, "", 1, r(), r(), r(), 200, None, '', 0))
     mock.data.append((6 + offset, "", 1, r(), r(), r(), 200, None, '', 0))
     mock.data.append((6 + offset, "", 1, r(), r(), r(), 200, None, '', 0))
     mock.data.append((5 + offset, "", 1, r(), r(), r(), 200, None, '', 0))
     return mock
def get_success_reader_alot(prefix='', offset=0):
    mock = MockReader()
    for x in range(2, 200):
        rnd = int(random() * x)
        mock.data.append((x + offset, prefix + random_string(1 + rnd), 1, r(), r(), r(), 200, '', '', 0))
    return mock
Exemple #41
0
 def get_reader(self, offset=0):
     mock = MockReader()
     mock.data.append((1 + offset, "", 1, r(), r(), r(), 200, None))
     mock.data.append((2 + offset, "", 1, r(), r(), r(), 200, None))
     mock.data.append((2 + offset, "", 1, r(), r(), r(), 200, None))
     mock.data.append((3 + offset, "", 1, r(), r(), r(), 200, None))
     mock.data.append((3 + offset, "", 1, r(), r(), r(), 200, None))
     mock.data.append((4 + offset, "", 1, r(), r(), r(), 200, None))
     mock.data.append((4 + offset, "", 1, r(), r(), r(), 200, None))
     mock.data.append((6 + offset, "", 1, r(), r(), r(), 200, None))
     mock.data.append((6 + offset, "", 1, r(), r(), r(), 200, None))
     mock.data.append((6 + offset, "", 1, r(), r(), r(), 200, None))
     mock.data.append((5 + offset, "", 1, r(), r(), r(), 200, None))
     return mock
    def test_1(self):
        obj = self.obj

        mock = MockReader()
        mock.buffer_scale_idx = '100.0'
        mock.data.append((1, "", 1, r(), r(), r(), 200, None, '', 0))
        mock.data.append((2, "", 1, r(), r(), r(), 200, None, '', 0))
        mock.data.append((2, "", 1, r(), r(), r(), 200, None, '', 0))
        mock.data.append((3, "", 1, r(), r(), r(), 200, None, '', 0))
        mock.data.append((3, "", 1, r(), r(), r(), 200, None, '', 0))
        mock.data.append((4, "", 1, r(), r(), r(), 200, None, '', 0))
        mock.data.append((4, "", 1, r(), r(), r(), 200, None, '', 0))

        obj.add_listener(mock)

        for point in mock.datapoints():
            self.assertNotEquals(0, point[DataPoint.CUMULATIVE][''][KPISet.CONCURRENCY])

        mock.data.append((2, "", 1, r(), r(), r(), 200, None, '', 0))
        mock.data.append((2, "", 1, r(), r(), r(), 200, None, '', 0))

        for point in mock.datapoints():
            pass

        for point in mock.datapoints(True):
            pass

        for point in mock.results:
            overall = point[DataPoint.CURRENT]['']
            self.assertTrue(len(overall[KPISet.PERCENTILES]) > 0)