def test_cp_time(self):
        """
        CP time benchmark
        """
        res = ScenarioRunner(self, self.volatile_config,
                             self.volatile_scenario).run()

        self.assert_result(res, 'CheckpointProbe')
    def test_put_all_wal(self):
        """
        putAll() WALs benchmark
        """
        res = ScenarioRunner(self, self.put_all_config,
                             self.put_all_scenario).run()

        self.assert_result(res, 'WalSizeProbe')
    def test_put_all_db(self):
        """
        putAll() DB folder size benchmark
        """
        res = ScenarioRunner(self, self.put_all_config,
                             self.put_all_scenario).run()

        self.assert_result(res, 'DbSizeProbe')
    def test_streamer_db(self):
        """
        Streamer DB size benchmark
        """
        res = ScenarioRunner(self, self.streamer_config,
                             self.streamer_scenario).run()

        self.assert_result(res, 'DbSizeProbe')
    def test_put_all_time(self):
        """
        putAll() time benchmark
        """
        res = ScenarioRunner(self, self.put_all_config,
                             self.put_all_scenario).run()

        self.assert_result(res, 'ExecutionTimeProbe')
    def test_streamer_time(self):
        """
        Streamer time benchmark
        """
        res = ScenarioRunner(self, self.streamer_config,
                             self.streamer_scenario).run()

        self.assert_result(res, 'ExecutionTimeProbe')
    def test_streamer_wal(self):
        """
        Streamer WALs benchmark
        """
        res = ScenarioRunner(self, self.streamer_config,
                             self.streamer_scenario).run()

        self.assert_result(res, 'WalSizeProbe')
    def test_start_node_time(self):
        """
        Start node time benchmark
        """
        res = ScenarioRunner(self, self.start_node_config,
                             self.start_node_scenario).run()

        self.assert_result(res, 'StartTimeProbe')
    def test_snapshot_db(self):
        """
        Snapshot DB folder size benchmark

        Load data, take few snapshots
        """
        res = ScenarioRunner(self, self.snapshot_config,
                             self.snapshot_scenario).run()

        self.assert_result(res, 'SnapshotSizeProbe')
    def test_snapshot_wal(self):
        """
        Snapshot WALs benchmark

        Load data, take few snapshots
        """
        res = ScenarioRunner(self, self.snapshot_config,
                             self.snapshot_scenario).run()

        self.assert_result(res, 'WalSizeProbe')
    def test_inc_snapshot_wal(self):
        """
        Snapshot INC execution time benchmark

        Load data, take few snapshots
        """
        res = ScenarioRunner(self, self.inc_snapshot_config,
                             self.inc_snapshot_scenario).run()

        self.assert_result(res, 'SnapshotSizeProbe')
    def test_full_snapshot_time(self):
        """
        Snapshot FULL execution time benchmark

        Load data, take few snapshots
        """
        res = ScenarioRunner(self, self.snapshot_config,
                             self.snapshot_scenario).run()

        self.assert_result(res, 'ExecutionTimeProbe')
Ejemplo n.º 13
0
    def run_test(self, rebalance_config, scenario):
        """
        Run test

        :param rebalance_config: rebalance config from yaml
        :param scenario: scenario object
        :return:
        """
        res = ScenarioRunner(self, rebalance_config, scenario).run()

        self.assert_result(res, 'RebalanceSpeedProbe')
    def run_test(self, dr_config, scenario):
        """
        Run test

        :param dr_config: dr config from yaml
        :param scenario: scenario object
        :return:
        """
        res = ScenarioRunner(self, dr_config, scenario).run()

        self.assert_result(res, 'ExecutionTimeProbe')
Ejemplo n.º 15
0
    def __init__(self, *args):
        super().__init__(*args)

        self.pme_config, self.artifacts = ScenarioRunner.initialize(
            self, args[0], 'pme.test_pme')

        self.scenarios = {
            'PME 1 server non-coordinator': (
                self.pme_config['pme_1_server_non_crd'],
                PMEServerScenario(self),
            ),
            'PME 1 server coordinator': (
                self.pme_config['pme_1_server_crd'],
                PMEServerScenario(self),
            ),
            'PME 5 server': (
                self.pme_config['pme_5_server'],
                PMEServerScenario(self),
            ),
            'PME 1 client': (
                self.pme_config['pme_1_client'],
                PMEClientScenario(self),
            ),
            'PME 5 client': (
                self.pme_config['pme_5_client'],
                PMEClientScenario(self),
            ),
            'PME activation': (
                self.pme_config['pme_activation'],
                PMEActivationScenario(self),
            ),
            'PME dynamic caches from client': (
                self.pme_config['pme_dynamic_caches_client'],
                PMEStartCachesScenario(self),
            ),
            'PME dynamic caches from server': (
                self.pme_config['pme_dynamic_caches_server'],
                PMEStartCachesScenario(self),
            ),
            'PME 64 client': (
                self.pme_config['pme_64_client'],
                PMEClientScenario(self),
            ),
        }
    def test_test_failed_probe_config_check(self):
        config = {
            'warmup_run': 2,  # times to warmup
            'times_to_run': 5,  # times to run (warmup + times to run = total runs in one try)
            'max_tries': 1,  # maximum number of full reruns (after 1 runs - fail the test)
            'times_to_run_increment': 3,  # increment time to run on each rerun
            'data_size': 5000,  # data load size
            'should_pass': False,
            'test_scenario_param': True,  # data load size
            'probes': {
                'test': {
                }
            }
        }

        try:
            ScenarioRunner(self, config, TestScenario(self)).run()
        except AssertionError as e:
            assert "There is no \"test_probe_param\" value in config" in str(e)
    def test_test_failed(self):
        config = {
            'warmup_run': 2,  # times to warmup
            'times_to_run': 5,  # times to run (warmup + times to run = total runs in one try)
            'max_tries': 1,  # maximum number of full reruns (after 1 runs - fail the test)
            'times_to_run_increment': 3,  # increment time to run on each rerun
            'data_size': 5000,  # data load size
            'should_pass': False,
            'test_scenario_param': True,  # data load size
            'probes': {
                'test': {
                    'test_probe_param': True,
                }
            }
        }

        res = ScenarioRunner(self, config, TestScenario(self)).run()

        assert not res['probes']['TestProbe']['result_passed'], \
            res['probes']['TestProbe']['result_message']
Ejemplo n.º 18
0
    def test_pme_bench_dynamic_caches_client(self):
        res = ScenarioRunner(
            self, *self.scenarios['PME dynamic caches from client']).run()

        ScenarioRunner.validate_probe(res, 'Exchange Dynamic Cache Start')
Ejemplo n.º 19
0
    def test_pme_bench_deactivate(self):
        res = ScenarioRunner(self, *self.scenarios['PME activate']).run()

        ScenarioRunner.validate_probe(res, 'Exchange Deactivate')
Ejemplo n.º 20
0
    def test_pme_bench_64_client_join(self):
        res = ScenarioRunner(self, *self.scenarios['PME 64 client']).run()

        ScenarioRunner.validate_probe(res, 'Exchange Client Join')
Ejemplo n.º 21
0
    def test_pme_bench_5_client_leave(self):
        res = ScenarioRunner(self, *self.scenarios['PME 5 client']).run()

        ScenarioRunner.validate_probe(res, 'Exchange Client Leave')
Ejemplo n.º 22
0
    def test_pme_bench_5_server_join(self):
        res = ScenarioRunner(self, *self.scenarios['PME 5 server']).run()

        ScenarioRunner.validate_probe(res, 'Exchange Server Join')
Ejemplo n.º 23
0
    def test_pme_bench_1_server_coordinator_leave(self):
        res = ScenarioRunner(
            self, *self.scenarios['PME 1 server coordinator']).run()

        ScenarioRunner.validate_probe(res, 'Exchange Server Leave')
Ejemplo n.º 24
0
    def test_pme_bench_1_server_join(self):
        res = ScenarioRunner(
            self, *self.scenarios['PME 1 server non-coordinator']).run()

        ScenarioRunner.validate_probe(res, 'Exchange Server Join')