Example #1
0
    def run(self):
        self.extract_tools()

        self.load()
        self.wait_for_persistence()
        self.backup()

        initial_backup_size = local.calc_backup_size(self.cluster_spec,
                                                     rounded=False)

        self.access()
        self.wait_for_persistence()

        # Define a secondary load. For this we borrow the 'creates' field,
        # since load doesn't normally use this anyway.
        inc_load = self.test_config.load_settings.creates
        workers = self.test_config.load_settings.workers
        size = self.test_config.load_settings.size

        # New key prefix needed to create incremental dataset.
        self.load(settings=LoadSettings({"items": inc_load,
                                         "workers": workers,
                                         "size": size}),
                  target_iterator=TargetIterator(self.cluster_spec,
                                                 self.test_config,
                                                 prefix='inc-'))
        self.wait_for_persistence()

        inc_backup_time = self.backup_with_stats(mode=True)
        total_backup_size = local.calc_backup_size(self.cluster_spec,
                                                   rounded=False)
        inc_backup_size = round(total_backup_size - initial_backup_size, 2)

        self._report_kpi(inc_backup_time, inc_backup_size)
Example #2
0
    def run(self):
        super().run()

        self.backup()
        self.wait_for_persistence()

        initial_size = local.calc_backup_size(self.cluster_spec)
        compact_time = self.compact()
        compacted_size = local.calc_backup_size(self.cluster_spec)
        size_diff = initial_size - compacted_size

        self.report_kpi(compact_time, size_diff)
Example #3
0
    def run(self):
        super().run()

        self.backup()
        self.wait_for_persistence()

        initial_size = local.calc_backup_size(self.cluster_spec, rounded=False)
        compact_time = self.compact()
        compacted_size = local.calc_backup_size(self.cluster_spec,
                                                rounded=False)
        # Size differences can be a little small, so go for more precision here
        size_diff = round(initial_size - compacted_size, 2)

        self.report_kpi(compact_time, size_diff)
Example #4
0
    def _report_kpi(self, time_elapsed):
        edition = self.rest.is_community(self.master_node) and 'CE' or 'EE'
        backup_size = local.calc_backup_size(self.cluster_spec)

        backing_store = self.test_config.backup_settings.storage_type
        sink_type = self.test_config.backup_settings.sink_type

        tool = 'backup'
        storage = None
        if backing_store:
            storage = backing_store
        elif sink_type:
            storage = sink_type

        self.reporter.post(
            *self.metrics.bnr_throughput(time_elapsed, edition, tool, storage)
        )

        if sink_type != 'blackhole':
            self.reporter.post(
                *self.metrics.backup_size(
                    backup_size,
                    edition,
                    tool if backing_store or sink_type else None,
                    storage)
            )
Example #5
0
    def _report_kpi(self, time_elapsed):
        edition = self.rest.is_community(self.master_node) and 'CE' or 'EE'
        backup_size = local.calc_backup_size(self.cluster_spec)

        backing_store = self.test_config.backup_settings.storage_type
        sink_type = self.test_config.backup_settings.sink_type

        tool = 'backup'
        if backing_store:
            tool += '-' + backing_store
        elif sink_type:
            tool += '-' + sink_type

        self.reporter.post(
            *self.metrics.bnr_throughput(time_elapsed,
                                         edition,
                                         tool=tool)
        )

        if sink_type != 'blackhole':
            self.reporter.post(
                *self.metrics.backup_size(
                    backup_size,
                    edition,
                    tool=tool if backing_store or sink_type else None)
            )
Example #6
0
    def _report_kpi(self, *args):
        edition = self.rest.is_community(self.master_node) and 'CE' or 'EE'
        backup_size = local.calc_backup_size(self.cluster_spec)

        self.reporter.post(
            *self.metrics.backup_size(backup_size, edition)
        )
Example #7
0
    def _report_kpi(self, time_elapsed):
        edition = self.rest.is_community(self.master_node) and 'CE' or 'EE'
        backup_size = local.calc_backup_size(self.cluster_spec)

        self.reporter.post(
            *self.metrics.bnr_throughput(time_elapsed, edition, tool='backup'))

        self.reporter.post(*self.metrics.backup_size(backup_size, edition))
Example #8
0
    def run(self):
        self.extract_tools()

        if self.test_config.backup_settings.use_tls or self.test_config.restore_settings.use_tls:
            self.download_certificate()

        self.get_tool_versions()

        self.load()
        self.wait_for_persistence()
        self.check_num_items()
        self.compact_bucket(wait=True)
        self.backup()

        initial_backup_size = local.calc_backup_size(self.cluster_spec, rounded=False)

        self.access()
        self.wait_for_persistence()

        # Define a secondary load. For this we borrow the 'creates' field,
        # since load doesn't normally use this anyway.
        inc_load = self.test_config.load_settings.additional_items
        workers = self.test_config.load_settings.workers
        size = self.test_config.load_settings.size

        # New key prefix needed to create incremental dataset.
        self.load(
            settings=LoadSettings({"items": inc_load, "workers": workers, "size": size}),
            target_iterator=TargetIterator(self.cluster_spec, self.test_config, prefix='inc-')
        )
        self.wait_for_persistence()

        try:
            inc_backup_time = self.backup_with_stats(mode=True)
            total_backup_size = local.calc_backup_size(self.cluster_spec, rounded=False)
            inc_backup_size = round(total_backup_size - initial_backup_size, 2)
        finally:
            self.collectlogs()

        self._report_kpi(inc_backup_time, inc_backup_size)
Example #9
0
    def _report_kpi(self):
        edition = self.rest.is_community(self.master_node) and 'CE' or 'EE'
        backup_size = local.calc_backup_size(self.cluster_spec)

        self.reporter.post_to_sf(
            *self.metric_helper.calc_bnr_throughput(self.time_elapsed,
                                                    edition,
                                                    tool='backup')
        )

        self.reporter.post_to_sf(
            *self.metric_helper.calc_backup_size(backup_size, edition)
        )
Example #10
0
    def _report_kpi(self, *args):
        edition = self.rest.is_community(self.master_node) and 'CE' or 'EE'
        backup_size = local.calc_backup_size(self.cluster_spec)

        backing_store = self.test_config.backup_settings.storage_type

        tool = 'backup'
        storage = None
        if backing_store:
            storage = backing_store

        self.reporter.post(
            *self.metrics.backup_size(backup_size, edition, tool, storage))
Example #11
0
    def run(self):
        self.extract_tools()

        self.load()
        self.wait_for_persistence()
        self.backup()

        initial_backup_size = local.calc_backup_size(self.cluster_spec,
                                                     rounded=False)

        self.access()
        self.wait_for_persistence()

        # Define a secondary load. For this we borrow the 'creates' field,
        # since load doesn't normally use this anyway.
        inc_load = self.test_config.load_settings.creates
        workers = self.test_config.load_settings.workers
        size = self.test_config.load_settings.size

        # New key prefix needed to create incremental dataset.
        self.load(settings=LoadSettings({
            "items": inc_load,
            "workers": workers,
            "size": size
        }),
                  target_iterator=TargetIterator(self.cluster_spec,
                                                 self.test_config,
                                                 prefix='inc-'))
        self.wait_for_persistence()

        inc_backup_time = self.backup_with_stats(mode=True)
        total_backup_size = local.calc_backup_size(self.cluster_spec,
                                                   rounded=False)
        inc_backup_size = round(total_backup_size - initial_backup_size, 2)

        self._report_kpi(inc_backup_time, inc_backup_size)
Example #12
0
    def _report_kpi(self, *args):
        edition = self.rest.is_community(self.master_node) and 'CE' or 'EE'
        backup_size = local.calc_backup_size(self.cluster_spec)

        self.reporter.post(*self.metrics.backup_size(backup_size, edition))