class ClusterManager(object): def __init__(self, cluster_spec, test_config, verbose): self.cluster_spec = cluster_spec self.test_config = test_config self.rest = RestHelper(cluster_spec) self.remote = RemoteHelper(cluster_spec, test_config, verbose) self.monitor = Monitor(cluster_spec) self.memcached = MemcachedHelper(test_config) self.clusters = cluster_spec.yield_clusters() self.servers = cluster_spec.yield_servers self.masters = cluster_spec.yield_masters self.initial_nodes = test_config.cluster.initial_nodes self.mem_quota = test_config.cluster.mem_quota self.group_number = test_config.cluster.group_number or 1 def set_data_path(self): if self.cluster_spec.paths: data_path, index_path = self.cluster_spec.paths for server in self.servers(): self.rest.set_data_path(server, data_path, index_path) def set_auth(self): for server in self.servers(): self.rest.set_auth(server) def set_mem_quota(self): for server in self.servers(): self.rest.set_mem_quota(server, self.mem_quota) def disable_moxi(self): if self.test_config.cluster.disable_moxi is not None: self.remote.disable_moxi() def create_server_groups(self): for master in self.masters(): for i in range(1, self.group_number): name = 'Group {}'.format(i + 1) self.rest.create_server_group(master, name=name) def add_nodes(self): for (_, servers), initial_nodes in zip(self.clusters, self.initial_nodes): if initial_nodes < 2: # Single-node cluster continue # Adding initial nodes master = servers[0] if self.group_number > 1: groups = self.rest.get_server_groups(master) else: groups = {} for i, host_port in enumerate(servers[1:initial_nodes], start=1): host = host_port.split(':')[0] uri = groups.get( server_group(servers[:initial_nodes], self.group_number, i)) self.rest.add_node(master, host, uri) # Rebalance master = servers[0] known_nodes = servers[:initial_nodes] ejected_nodes = [] self.rest.rebalance(master, known_nodes, ejected_nodes) self.monitor.monitor_rebalance(master) def create_buckets(self): ram_quota = self.mem_quota / self.test_config.cluster.num_buckets replica_number = self.test_config.bucket.replica_number replica_index = self.test_config.bucket.replica_index eviction_policy = self.test_config.bucket.eviction_policy threads_number = self.test_config.bucket.threads_number password = self.test_config.bucket.password for master in self.masters(): for bucket_name in self.test_config.buckets: self.rest.create_bucket(host_port=master, name=bucket_name, ram_quota=ram_quota, replica_number=replica_number, replica_index=replica_index, eviction_policy=eviction_policy, threads_number=threads_number, password=password) def configure_auto_compaction(self): compaction_settings = self.test_config.compaction for master in self.masters(): self.rest.configure_auto_compaction(master, compaction_settings) def configure_internal_settings(self): internal_settings = self.test_config.internal_settings for master in self.masters(): for parameter, value in internal_settings.items(): self.rest.set_internal_settings(master, {parameter: int(value)}) def tweak_memory(self): self.remote.reset_swap() self.remote.drop_caches() self.remote.set_swappiness() self.remote.disable_thp() def restart_with_alternative_num_vbuckets(self): num_vbuckets = self.test_config.cluster.num_vbuckets if num_vbuckets is not None: erl_cmd = 'ns_config:set(couchbase_num_vbuckets_default, {}).'.format( num_vbuckets) logger.info('Changing couchbase_num_vbuckets_default to {}'.format( num_vbuckets)) for master in self.masters(): self.rest.run_diag_eval(master, erl_cmd) def restart_with_alternative_bucket_options(self): cmd = 'ns_bucket:update_bucket_props("{}", ' \ '[{{extra_config_string, "{}={}"}}]).' for option in ('defragmenter_enabled', 'exp_pager_stime', 'ht_locks', 'max_num_shards', 'max_threads', 'warmup_min_memory_threshold'): value = getattr(self.test_config.bucket, option) if value != -1: logger.info('Changing {} to {}'.format(option, value)) for master in self.masters(): for bucket in self.test_config.buckets: diag_eval = cmd.format(bucket, option, value) self.rest.run_diag_eval(master, diag_eval) self.remote.restart() def tune_logging(self): self.remote.tune_log_rotation() self.remote.restart() def restart_with_alternative_num_cpus(self): num_cpus = self.test_config.cluster.num_cpus if num_cpus: self.remote.restart_with_alternative_num_cpus(num_cpus) def restart_with_alternative_jemalloc_conf(self): self.remote.restart_with_jemalloc_conf() def configure_tcmalloc_aggressive_decommit(self): value = self.test_config.cluster.tcmalloc_aggressive_decommit if value != -1: self.remote.configure_tcmalloc_aggressive_decommit(value) def restart_with_sfwi(self): if self.test_config.cluster.sfwi: self.remote.restart_with_sfwi() def enable_auto_failover(self): for master in self.masters(): self.rest.enable_auto_failover(master) def wait_until_warmed_up(self): target_iterator = TargetIterator(self.cluster_spec, self.test_config) for target in target_iterator: self.monitor.monitor_warmup(self.memcached, target.node, target.bucket) def wait_until_healthy(self): for master in self.cluster_spec.yield_masters(): self.monitor.monitor_node_health(master) def change_watermarks(self): watermark_settings = self.test_config.watermark_settings for host_port, initial_nodes in zip(self.servers(), self.initial_nodes): host = host_port.split(':')[0] memcached_port = self.rest.get_memcached_port(host_port) for bucket in self.test_config.buckets: for key, val in watermark_settings.items(): val = self.memcached.calc_watermark(val, self.mem_quota) self.memcached.set_flusher_param(host, memcached_port, bucket, key, val) def start_cbq_engine(self): if self.test_config.cluster.run_cbq: self.remote.start_cbq()
class ClusterManager(object): def __init__(self, cluster_spec, test_config, verbose): self.cluster_spec = cluster_spec self.test_config = test_config self.rest = RestHelper(cluster_spec) self.remote = RemoteHelper(cluster_spec, test_config, verbose) self.monitor = Monitor(cluster_spec) self.memcached = MemcachedHelper(test_config) self.clusters = cluster_spec.yield_clusters() self.servers = cluster_spec.yield_servers self.masters = cluster_spec.yield_masters self.initial_nodes = test_config.cluster.initial_nodes self.mem_quota = test_config.cluster.mem_quota self.group_number = test_config.cluster.group_number or 1 def set_data_path(self): if self.cluster_spec.paths: data_path, index_path = self.cluster_spec.paths for server in self.servers(): self.rest.set_data_path(server, data_path, index_path) def set_auth(self): for server in self.servers(): self.rest.set_auth(server) def set_mem_quota(self): for server in self.servers(): self.rest.set_mem_quota(server, self.mem_quota) def disable_moxi(self): if self.test_config.cluster.disable_moxi is not None: self.remote.disable_moxi() def create_server_groups(self): for master in self.masters(): for i in range(1, self.group_number): name = 'Group {}'.format(i + 1) self.rest.create_server_group(master, name=name) def add_nodes(self): for (_, servers), initial_nodes in zip(self.clusters, self.initial_nodes): if initial_nodes < 2: # Single-node cluster continue # Adding initial nodes master = servers[0] if self.group_number > 1: groups = self.rest.get_server_groups(master) else: groups = {} for i, host_port in enumerate(servers[1:initial_nodes], start=1): host = host_port.split(':')[0] uri = groups.get(server_group(servers[:initial_nodes], self.group_number, i)) self.rest.add_node(master, host, uri) # Rebalance master = servers[0] known_nodes = servers[:initial_nodes] ejected_nodes = [] self.rest.rebalance(master, known_nodes, ejected_nodes) self.monitor.monitor_rebalance(master) def create_buckets(self): ram_quota = self.mem_quota / self.test_config.cluster.num_buckets replica_number = self.test_config.bucket.replica_number replica_index = self.test_config.bucket.replica_index eviction_policy = self.test_config.bucket.eviction_policy threads_number = self.test_config.bucket.threads_number password = self.test_config.bucket.password for master in self.masters(): for bucket_name in self.test_config.buckets: self.rest.create_bucket(host_port=master, name=bucket_name, ram_quota=ram_quota, replica_number=replica_number, replica_index=replica_index, eviction_policy=eviction_policy, threads_number=threads_number, password=password) def configure_auto_compaction(self): compaction_settings = self.test_config.compaction for master in self.masters(): self.rest.configure_auto_compaction(master, compaction_settings) def configure_internal_settings(self): internal_settings = self.test_config.internal_settings for master in self.masters(): for parameter, value in internal_settings.items(): self.rest.set_internal_settings(master, {parameter: int(value)}) def tweak_memory(self): self.remote.reset_swap() self.remote.drop_caches() self.remote.set_swappiness() self.remote.disable_thp() def restart_with_alternative_num_vbuckets(self): num_vbuckets = self.test_config.cluster.num_vbuckets if num_vbuckets is not None: erl_cmd = 'ns_config:set(couchbase_num_vbuckets_default, {}).'.format( num_vbuckets) logger.info('Changing couchbase_num_vbuckets_default to {}'.format(num_vbuckets)) for master in self.masters(): self.rest.run_diag_eval(master, erl_cmd) def restart_with_alternative_bucket_options(self): cmd = 'ns_bucket:update_bucket_props("{}", ' \ '[{{extra_config_string, "{}={}"}}]).' for option in ('defragmenter_enabled', 'exp_pager_stime', 'ht_locks', 'max_num_shards', 'max_threads', 'warmup_min_memory_threshold'): value = getattr(self.test_config.bucket, option) if value != -1: logger.info('Changing {} to {}'.format(option, value)) for master in self.masters(): for bucket in self.test_config.buckets: diag_eval = cmd.format(bucket, option, value) self.rest.run_diag_eval(master, diag_eval) self.remote.restart() def tune_logging(self): self.remote.tune_log_rotation() self.remote.restart() def restart_with_alternative_num_cpus(self): num_cpus = self.test_config.cluster.num_cpus if num_cpus: self.remote.restart_with_alternative_num_cpus(num_cpus) def restart_with_alternative_jemalloc_conf(self): self.remote.restart_with_jemalloc_conf() def configure_tcmalloc_aggressive_decommit(self): value = self.test_config.cluster.tcmalloc_aggressive_decommit if value != -1: self.remote.configure_tcmalloc_aggressive_decommit(value) def restart_with_sfwi(self): if self.test_config.cluster.sfwi: self.remote.restart_with_sfwi() def enable_auto_failover(self): for master in self.masters(): self.rest.enable_auto_failover(master) def wait_until_warmed_up(self): target_iterator = TargetIterator(self.cluster_spec, self.test_config) for target in target_iterator: self.monitor.monitor_warmup(self.memcached, target.node, target.bucket) def wait_until_healthy(self): for master in self.cluster_spec.yield_masters(): self.monitor.monitor_node_health(master) def change_watermarks(self): watermark_settings = self.test_config.watermark_settings for host_port, initial_nodes in zip(self.servers(), self.initial_nodes): host = host_port.split(':')[0] memcached_port = self.rest.get_memcached_port(host_port) for bucket in self.test_config.buckets: for key, val in watermark_settings.items(): val = self.memcached.calc_watermark(val, self.mem_quota) self.memcached.set_flusher_param(host, memcached_port, bucket, key, val) def start_cbq_engine(self): if self.test_config.cluster.run_cbq: self.remote.start_cbq()