def __init__(self, *args): super().__init__(*args) self.functions = self.test_config.eventing_settings.functions self.worker_count = self.test_config.eventing_settings.worker_count self.cpp_worker_thread_count = self.test_config.eventing_settings.cpp_worker_thread_count self.timer_worker_pool_size = self.test_config.eventing_settings.timer_worker_pool_size self.worker_queue_cap = self.test_config.eventing_settings.worker_queue_cap self.timer_timeout = self.test_config.eventing_settings.timer_timeout self.timer_fuzz = self.test_config.eventing_settings.timer_fuzz self.config_file = self.test_config.eventing_settings.config_file self.time = self.test_config.access_settings.time self.rebalance_settings = self.test_config.rebalance_settings self.request_url = self.test_config.eventing_settings.request_url for master in self.cluster_spec.masters: self.rest.add_rbac_user( host=master, user="******", password="******", roles=['admin'], ) self.target_iterator = TargetIterator(self.cluster_spec, self.test_config, "eventing")
def __init__(self, *args): super().__init__(*args) self.configfile = self.test_config.gsi_settings.cbindexperf_configfile self.configfiles = self.test_config.gsi_settings.cbindexperf_configfiles.split( ",") self.init_num_connections = self.test_config.gsi_settings.init_num_connections self.step_num_connections = self.test_config.gsi_settings.step_num_connections self.max_num_connections = self.test_config.gsi_settings.max_num_connections self.run_recovery_test = self.test_config.gsi_settings.run_recovery_test self.incremental_only = self.test_config.gsi_settings.incremental_only self.incremental_load_iterations = self.test_config.gsi_settings.incremental_load_iterations self.scan_time = self.test_config.gsi_settings.scan_time self.storage = self.test_config.gsi_settings.storage self.indexes = self.test_config.gsi_settings.indexes self.secondary_statsfile = self.test_config.stats_settings.secondary_statsfile self.index_nodes = self.cluster_spec.servers_by_role('index') self.bucket = self.test_config.buckets[0] self.target_iterator = TargetIterator(self.cluster_spec, self.test_config, "gsi") if self.storage == "plasma": self.COLLECTORS["secondary_storage_stats"] = True self.COLLECTORS["secondary_storage_stats_mm"] = True
def run(self): load_settings = self.test_config.load_settings load_settings.items = load_settings.items / 2 iterator = TargetIterator(self.cluster_spec, self.test_config, 'n1ql') self.load(load_settings, iterator) self.load(load_settings) self.wait_for_persistence() self.compact_bucket() self.build_index() self._create_prepared_statements() self.workload = self.test_config.access_settings self.workload.items = self.workload.items / 2 self.workload.n1ql_queries = getattr(self, 'n1ql_queries', self.workload.n1ql_queries) self.access_bg(self.workload) self.access(self.workload) if self.test_config.stats_settings.enabled: self.reporter.post_to_sf( *self.metric_helper.calc_avg_n1ql_queries())
def access(self, *args): self.download_certificate() access_settings = self.test_config.access_settings access_settings.items //= 2 access_settings.workers = 0 iterator = TargetIterator(self.cluster_spec, self.test_config, 'n1ql') super().access(settings=access_settings, target_iterator=iterator)
def access(self, *args): access_settings = self.test_config.access_settings access_settings.items //= 2 access_settings.workers = 0 access_settings.buckets = self.test_config.buckets access_settings.doc_gen = self.test_config.access_settings.n1ql_gen iterator = TargetIterator(self.cluster_spec, self.test_config, 'n1ql') super(N1QLTest, self).access(settings=access_settings, target_iterator=iterator)
def __init__(self, *args, **kwargs): options, args = get_options() self.cluster_spec = ClusterSpec() self.cluster_spec.parse(options.cluster_spec_fname, args) self.test_config = TestConfig() self.test_config.parse(options.test_config_fname, args) self.target_iterator = TargetIterator(self.cluster_spec, self.test_config) self.memcached = MemcachedHelper(self.test_config) self.remote = RemoteHelper(self.cluster_spec, self.test_config) self.rest = RestHelper(self.cluster_spec) super(FunctionalTest, self).__init__(*args, **kwargs)
def __init__(self, *args, **kwargs): options, _args = get_options() override = \ _args and (arg.split('.') for arg in ' '.join(_args).split(',')) self.cluster_spec = ClusterSpec() self.cluster_spec.parse(options.cluster_spec_fname) self.test_config = TestConfig() self.test_config.parse(options.test_config_fname, override) self.target_iterator = TargetIterator(self.cluster_spec, self.test_config) self.memcached = MemcachedHelper(self.cluster_spec) self.remote = RemoteHelper(self.cluster_spec) super(FunctionalTest, self).__init__(*args, **kwargs)
def __init__(self, *args): super().__init__(*args) self.configfile = self.test_config.gsi_settings.cbindexperf_configfile self.configfiles = self.test_config.gsi_settings.cbindexperf_configfiles.split( ",") self.run_recovery_test = self.test_config.gsi_settings.run_recovery_test self.incremental_only = self.test_config.gsi_settings.incremental_only self.incremental_load_iterations = self.test_config.gsi_settings.incremental_load_iterations self.scan_time = self.test_config.gsi_settings.scan_time self.report_initial_build_time = self.test_config.gsi_settings.report_initial_build_time self.storage = self.test_config.gsi_settings.storage self.indexes = self.test_config.gsi_settings.indexes self.bucket = self.test_config.buckets[0] self.target_iterator = TargetIterator(self.cluster_spec, self.test_config, "gsi") extract_cb_deb(filename='couchbase.deb') self.cbindexperf_concurrency = self.test_config.gsi_settings.cbindexperf_concurrency self.cbindexperf_repeat = self.test_config.gsi_settings.cbindexperf_repeat if self.cbindexperf_concurrency and self.cbindexperf_repeat: with open(self.configfile, 'r') as f: cbindexperf_contents = json.load(f) cbindexperf_contents["Concurrency"] = self.cbindexperf_concurrency for scan_spec in cbindexperf_contents["ScanSpecs"]: scan_spec["Repeat"] = self.cbindexperf_repeat with open(self.configfile, 'w') as f: json.dump(cbindexperf_contents, f) if self.storage == "plasma": self.COLLECTORS["secondary_storage_stats"] = True self.COLLECTORS["secondary_storage_stats_mm"] = True if self.test_config.gsi_settings.disable_perindex_stats: self.COLLECTORS["secondary_debugstats_index"] = False self.COLLECTORS["secondary_storage_stats"] = False self.build = self.rest.get_version(self.master_node)
def __init__(self, *args): super().__init__(*args) self.functions = self.test_config.eventing_settings.functions self.worker_count = self.test_config.eventing_settings.worker_count self.cpp_worker_thread_count = self.test_config.eventing_settings.cpp_worker_thread_count self.function_nodes = self.cluster_spec.servers_by_role('eventing') for master in self.cluster_spec.masters: self.rest.add_rbac_user( host=master, bucket="eventing", password="******", roles=['admin'], ) self.target_iterator = TargetIterator(self.cluster_spec, self.test_config, "eventing")
def __init__(self, cluster_spec: ClusterSpec, test_config: TestConfig, verbose: bool): self.cluster_spec = cluster_spec self.test_config = test_config self.memcached = MemcachedHelper(test_config) self.remote = RemoteHelper(cluster_spec, test_config, verbose) self.rest = RestHelper(cluster_spec) # self.build = os.environ.get('SGBUILD') or "0.0.0-000" self.master_node = next(cluster_spec.masters) self.build = self.rest.get_sgversion(self.master_node) self.metrics = MetricHelper(self) self.reporter = ShowFastReporter(cluster_spec, test_config, self.build) if self.test_config.test_case.use_workers: self.worker_manager = WorkerManager(cluster_spec, test_config, verbose) self.settings = self.test_config.access_settings self.settings.syncgateway_settings = self.test_config.syncgateway_settings self.profiler = Profiler(cluster_spec, test_config) self.cluster = ClusterManager(cluster_spec, test_config) self.target_iterator = TargetIterator(cluster_spec, test_config) self.monitor = Monitor(cluster_spec, test_config, verbose)
def load(self, *args): """Create two data sets with different key prefixes. In order to run the N1QL tests we need to satisfy two contradicting requirements: * Fields should be changed so that the secondary indexes are being updated. * Fields remain the same (based on a deterministic random algorithm) so that we can query them. The following workaround was introduced: * 50% of documents are being randomly mutated. These documents are not used for queries. * 50% of documents remain unchanged. Only these documents are used for queries. """ load_settings = self.test_config.load_settings load_settings.items //= 2 iterator = TargetIterator(self.cluster_spec, self.test_config, 'n1ql') super().load(settings=load_settings, target_iterator=iterator) super().load(settings=load_settings)
def __init__(self, *args): super().__init__(*args) self.configfile = self.test_config.gsi_settings.cbindexperf_configfile self.configfiles = self.test_config.gsi_settings.cbindexperf_configfiles.split( ",") self.run_recovery_test = self.test_config.gsi_settings.run_recovery_test self.incremental_only = self.test_config.gsi_settings.incremental_only self.incremental_load_iterations = self.test_config.gsi_settings.incremental_load_iterations self.scan_time = self.test_config.gsi_settings.scan_time self.report_initial_build_time = self.test_config.gsi_settings.report_initial_build_time self.storage = self.test_config.gsi_settings.storage self.indexes = self.test_config.gsi_settings.indexes self.bucket = self.test_config.buckets[0] self.target_iterator = TargetIterator(self.cluster_spec, self.test_config, "gsi") extract_cb_deb(filename='couchbase.deb') if self.storage == "plasma": self.COLLECTORS["secondary_storage_stats"] = True self.COLLECTORS["secondary_storage_stats_mm"] = True
def __init__(self, *args, **kwargs): super(TuqTest, self).__init__(*args, **kwargs) self.n1ql = True self.target_iterator = TargetIterator(self.cluster_spec, self.test_config, prefix='')
def xattr_load(self, *args, **kwargs): iterator = TargetIterator(self.cluster_spec, self.test_config, 'n1ql') super().xattr_load() super().xattr_load(target_iterator=iterator)
def wait_until_warmed_up(self): target_iterator = TargetIterator(self.cluster_spec, self.test_config) for target in target_iterator: self.monitor.monitor_warmup(self.memcached, target.node, target.bucket)
def wait_until_warmed_up(self): target_iterator = TargetIterator(self.cluster_spec, self.test_config) for target in target_iterator: host = target.node.split(':')[0] self.monitor.monitor_warmup(self.memcached, host, target.bucket)
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.target_iterator = TargetIterator(self.cluster_spec, self.test_config, prefix='symmetric')