def __populate_cluster_info(self, cluster_id, servers, cluster_srv, cluster_name, service_config): nodes = list() for server in servers: temp_server = TestInputServer() temp_server.ip = server.get("hostname") temp_server.hostname = server.get("hostname") temp_server.services = server.get("services") temp_server.port = "18091" temp_server.rest_username = self.rest_username temp_server.rest_password = self.rest_password temp_server.hosted_on_cloud = True temp_server.memcached_port = "11207" nodes.append(temp_server) cluster = CBCluster(username=self.rest_username, password=self.rest_password, servers=[None] * 40) cluster.id = cluster_id cluster.srv = cluster_srv cluster.cluster_config = service_config cluster.pod = self.pod cluster.tenant = self.tenant for temp_server in nodes: if "Data" in temp_server.services: cluster.kv_nodes.append(temp_server) if "Query" in temp_server.services: cluster.query_nodes.append(temp_server) if "Index" in temp_server.services: cluster.index_nodes.append(temp_server) if "Eventing" in temp_server.services: cluster.eventing_nodes.append(temp_server) if "Analytics" in temp_server.services: cluster.cbas_nodes.append(temp_server) if "FTS" in temp_server.services: cluster.fts_nodes.append(temp_server) cluster.master = cluster.kv_nodes[0] self.tenant.clusters.update({cluster.id: cluster}) cluster.master = cluster.kv_nodes[0] self.tenant.clusters.update({cluster.id: cluster}) self.cb_clusters[cluster_name] = cluster self.cb_clusters[cluster_name].cloud_cluster = True
def setUp(self): self.input = TestInputSingleton.input # Framework specific parameters self.log_level = self.input.param("log_level", "info").upper() self.infra_log_level = self.input.param("infra_log_level", "info").upper() self.skip_setup_cleanup = self.input.param("skip_setup_cleanup", False) self.tear_down_while_setup = self.input.param("tear_down_while_setup", True) self.test_timeout = self.input.param("test_timeout", 3600) self.thread_to_use = self.input.param("threads_to_use", 10) self.case_number = self.input.param("case_number", 0) # End of framework parameters # Cluster level info settings self.log_info = self.input.param("log_info", None) self.log_location = self.input.param("log_location", None) self.stat_info = self.input.param("stat_info", None) self.port = self.input.param("port", None) self.port_info = self.input.param("port_info", None) self.servers = self.input.servers self.__cb_clusters = [] self.num_servers = self.input.param("servers", len(self.servers)) self.primary_index_created = False self.index_quota_percent = self.input.param("index_quota_percent", None) self.gsi_type = self.input.param("gsi_type", 'plasma') # CBAS setting self.jre_path = self.input.param("jre_path", None) # End of cluster info parameters # Bucket specific params self.bucket_type = self.input.param("bucket_type", Bucket.bucket_type.MEMBASE) self.bucket_size = self.input.param("bucket_size", None) self.bucket_lww = self.input.param("lww", True) self.standard_buckets = self.input.param("standard_buckets", 1) if self.standard_buckets > 10: self.bucket_util.change_max_buckets(self.standard_buckets) self.vbuckets = self.input.param("vbuckets", 1024) self.num_replicas = self.input.param("replicas", 1) self.active_resident_threshold = int( self.input.param("active_resident_threshold", 100)) self.compression_mode = self.input.param("compression_mode", 'passive') # End of bucket parameters # Doc specific params self.key_size = self.input.param("key_size", 0) self.doc_size = self.input.param("doc_size", 10) self.sub_doc_size = self.input.param("sub_doc_size", 10) self.doc_type = self.input.param("doc_type", "json") self.num_items = self.input.param("num_items", 100000) self.target_vbucket = self.input.param("target_vbucket", None) self.maxttl = self.input.param("maxttl", 0) # End of doc specific parameters # Transactions parameters self.transaction_timeout = self.input.param("transaction_timeout", 100) self.transaction_commit = self.input.param("transaction_commit", True) self.update_count = self.input.param("update_count", 1) self.sync = self.input.param("sync", True) self.default_bucket = self.input.param("default_bucket", True) self.num_buckets = self.input.param("num_buckets", 0) self.atomicity = self.input.param("atomicity", False) # end of transaction parameters # Client specific params self.sdk_client_type = self.input.param("sdk_client_type", "java") self.sdk_compression = self.input.param("sdk_compression", True) self.replicate_to = self.input.param("replicate_to", 0) self.persist_to = self.input.param("persist_to", 0) self.sdk_retries = self.input.param("sdk_retries", 5) self.sdk_timeout = self.input.param("sdk_timeout", 5) self.durability_level = self.input.param("durability", "") # Doc Loader Params self.process_concurrency = self.input.param("process_concurrency", 8) self.batch_size = self.input.param("batch_size", 20) self.ryow = self.input.param("ryow", False) self.check_persistence = self.input.param("check_persistence", False) # End of client specific parameters # initial number of items in the cluster self.services_init = self.input.param("services_init", None) self.nodes_init = self.input.param("nodes_init", 1) self.nodes_in = self.input.param("nodes_in", 1) self.nodes_out = self.input.param("nodes_out", 1) self.services_in = self.input.param("services_in", None) self.forceEject = self.input.param("forceEject", False) self.wait_timeout = self.input.param("wait_timeout", 60) self.dgm_run = self.input.param("dgm_run", False) self.verify_unacked_bytes = self.input.param("verify_unacked_bytes", False) self.disabled_consistent_view = self.input.param( "disabled_consistent_view", None) self.rebalanceIndexWaitingDisabled = self.input.param( "rebalanceIndexWaitingDisabled", None) self.rebalanceIndexPausingDisabled = self.input.param( "rebalanceIndexPausingDisabled", None) self.maxParallelIndexers = self.input.param("maxParallelIndexers", None) self.maxParallelReplicaIndexers = self.input.param( "maxParallelReplicaIndexers", None) self.quota_percent = self.input.param("quota_percent", None) if not hasattr(self, 'skip_buckets_handle'): self.skip_buckets_handle = self.input.param( "skip_buckets_handle", False) # Initiate logging variables self.log = logging.getLogger("test") self.infra_log = logging.getLogger("infra") # Configure loggers self.log.setLevel(self.log_level) self.infra_log.setLevel(self.infra_log_level) # Support lib objects for testcase execution self.task_manager = TaskManager(self.thread_to_use) self.task = ServerTasks(self.task_manager) # End of library object creation self.cleanup = False self.nonroot = False self.test_failure = None self.__log_setup_status("started") if len(self.input.clusters) > 1: # Multi cluster setup counter = 1 for _, nodes in self.input.clusters.iteritems(): self.__cb_clusters.append( CBCluster(name="C%s" % counter, servers=nodes)) counter += 1 else: # Single cluster self.cluster = CBCluster(servers=self.servers) self.__cb_clusters.append(self.cluster) self.cluster_util = ClusterUtils(self.cluster, self.task_manager) self.bucket_util = BucketUtils(self.cluster, self.cluster_util, self.task) for cluster in self.__cb_clusters: shell = RemoteMachineShellConnection(cluster.master) self.os_info = shell.extract_remote_info().type.lower() if self.os_info != 'windows': if cluster.master.ssh_username != "root": self.nonroot = True shell.disconnect() break """ some tests need to bypass checking cb server at set up to run installation """ self.skip_init_check_cbserver = self.input.param( "skip_init_check_cbserver", False) try: if self.skip_setup_cleanup: self.buckets = self.bucket_util.get_all_buckets() return if not self.skip_init_check_cbserver: for cluster in self.__cb_clusters: self.cb_version = None if RestHelper(RestConnection( cluster.master)).is_ns_server_running(): """ Since every new couchbase version, there will be new features that test code won't work on previous release. So we need to get couchbase version to filter out those tests. """ self.cb_version = RestConnection( cluster.master).get_nodes_version() else: self.log.debug("couchbase server does not run yet") # We stopped supporting TAP protocol since 3.x and 3.x support also has stopped self.protocol = "dcp" self.services_map = None self.__log_setup_status("started") for cluster in self.__cb_clusters: if not self.skip_buckets_handle and not self.skip_init_check_cbserver: self.log.debug("Cleaning up cluster") cluster_util = ClusterUtils(cluster, self.task_manager) bucket_util = BucketUtils(cluster, cluster_util, self.task) cluster_util.cluster_cleanup(bucket_util) # avoid any cluster operations in setup for new upgrade # & upgradeXDCR tests if str(self.__class__).find('newupgradetests') != -1 or \ str(self.__class__).find('upgradeXDCR') != -1 or \ str(self.__class__).find('Upgrade_EpTests') != -1 or \ hasattr(self, 'skip_buckets_handle') and \ self.skip_buckets_handle: self.log.warning( "any cluster operation in setup will be skipped") self.primary_index_created = True self.__log_setup_status("finished") return # avoid clean up if the previous test has been tear down if self.case_number == 1 or self.case_number > 1000: if self.case_number > 1000: self.log.warn( "TearDown for previous test failed. will retry..") self.case_number -= 1000 self.cleanup = True if not self.skip_init_check_cbserver: self.tearDownEverything() self.tear_down_while_setup = False if not self.skip_init_check_cbserver: for cluster in self.__cb_clusters: self.log.info("Initializing cluster") cluster_util = ClusterUtils(cluster, self.task_manager) # self.cluster_util.reset_cluster() master_services = cluster_util.get_services( cluster.servers[:1], self.services_init, start_node=0) if master_services is not None: master_services = master_services[0].split(",") self.quota = self._initialize_nodes( self.task, cluster, self.disabled_consistent_view, self.rebalanceIndexWaitingDisabled, self.rebalanceIndexPausingDisabled, self.maxParallelIndexers, self.maxParallelReplicaIndexers, self.port, self.quota_percent, services=master_services) cluster_util.change_env_variables() cluster_util.change_checkpoint_params() #cluster_util.add_all_nodes_then_rebalance(cluster.servers[1:]) self.log.info("{0} initialized".format(cluster)) else: self.quota = "" for cluster in self.__cb_clusters: cluster_util = ClusterUtils(cluster, self.task_manager) if self.log_info: cluster_util.change_log_info() if self.log_location: cluster_util.change_log_location() if self.stat_info: cluster_util.change_stat_info() if self.port_info: cluster_util.change_port_info() if self.port: self.port = str(self.port) self.__log_setup_status("finished") if not self.skip_init_check_cbserver: self.__log("started") self.sleep(5) except Exception, e: traceback.print_exc() self.task.shutdown(force=True) self.fail(e)
def setUp(self): self.input = TestInputSingleton.input # Framework specific parameters self.log_level = self.input.param("log_level", "info").upper() self.infra_log_level = self.input.param("infra_log_level", "error").upper() self.skip_setup_cleanup = self.input.param("skip_setup_cleanup", False) self.tear_down_while_setup = self.input.param("tear_down_while_setup", True) self.test_timeout = self.input.param("test_timeout", 3600) self.thread_to_use = self.input.param("threads_to_use", 30) self.case_number = self.input.param("case_number", 0) # End of framework parameters # Cluster level info settings self.log_info = self.input.param("log_info", None) self.log_location = self.input.param("log_location", None) self.stat_info = self.input.param("stat_info", None) self.port = self.input.param("port", None) self.port_info = self.input.param("port_info", None) self.servers = self.input.servers self.cb_clusters = OrderedDict() self.num_servers = self.input.param("servers", len(self.servers)) self.primary_index_created = False self.index_quota_percent = self.input.param("index_quota_percent", None) self.gsi_type = self.input.param("gsi_type", 'plasma') # CBAS setting self.jre_path = self.input.param("jre_path", None) self.enable_dp = self.input.param("enable_dp", False) # End of cluster info parameters # Bucket specific params self.bucket_type = self.input.param("bucket_type", Bucket.Type.MEMBASE) self.bucket_ttl = self.input.param("bucket_ttl", 0) self.bucket_size = self.input.param("bucket_size", None) self.bucket_conflict_resolution_type = \ self.input.param("bucket_conflict_resolution", Bucket.ConflictResolution.SEQ_NO) self.bucket_replica_index = self.input.param("bucket_replica_index", 1) self.bucket_eviction_policy = \ self.input.param("bucket_eviction_policy", Bucket.EvictionPolicy.VALUE_ONLY) self.flush_enabled = self.input.param("flushEnabled", Bucket.FlushBucket.DISABLED) self.bucket_time_sync = self.input.param("bucket_time_sync", False) self.standard_buckets = self.input.param("standard_buckets", 1) self.num_replicas = self.input.param("replicas", Bucket.ReplicaNum.ONE) self.active_resident_threshold = \ int(self.input.param("active_resident_threshold", 100)) self.compression_mode = \ self.input.param("compression_mode", Bucket.CompressionMode.PASSIVE) self.bucket_storage = \ self.input.param("bucket_storage", Bucket.StorageBackend.couchstore) if self.bucket_storage == Bucket.StorageBackend.magma: self.bucket_eviction_policy = Bucket.EvictionPolicy.FULL_EVICTION self.scope_name = self.input.param("scope", CbServer.default_scope) self.collection_name = self.input.param("collection", CbServer.default_collection) self.bucket_durability_level = self.input.param( "bucket_durability", Bucket.DurabilityLevel.NONE).upper() self.bucket_purge_interval = self.input.param("bucket_purge_interval", 1) self.bucket_durability_level = \ BucketDurability[self.bucket_durability_level] # End of bucket parameters # Doc specific params self.key = self.input.param("key", "test_docs") self.key_size = self.input.param("key_size", 8) self.doc_size = self.input.param("doc_size", 256) self.sub_doc_size = self.input.param("sub_doc_size", 10) self.doc_type = self.input.param("doc_type", "json") self.num_items = self.input.param("num_items", 100000) self.target_vbucket = self.input.param("target_vbucket", None) self.maxttl = self.input.param("maxttl", 0) self.random_exp = self.input.param("random_exp", False) self.randomize_doc_size = self.input.param("randomize_doc_size", False) self.randomize_value = self.input.param("randomize_value", False) self.rev_write = self.input.param("rev_write", False) self.rev_read = self.input.param("rev_read", False) self.rev_update = self.input.param("rev_update", False) self.rev_del = self.input.param("rev_del", False) self.random_key = self.input.param("random_key", False) self.mix_key_size = self.input.param("mix_key_size", False) # End of doc specific parameters # Transactions parameters self.transaction_timeout = self.input.param("transaction_timeout", 100) self.transaction_commit = self.input.param("transaction_commit", True) self.update_count = self.input.param("update_count", 1) self.sync = self.input.param("sync", True) self.default_bucket = self.input.param("default_bucket", True) self.num_buckets = self.input.param("num_buckets", 0) self.atomicity = self.input.param("atomicity", False) self.defer = self.input.param("defer", False) # end of transaction parameters # Client specific params self.sdk_client_type = self.input.param("sdk_client_type", "java") self.replicate_to = self.input.param("replicate_to", 0) self.persist_to = self.input.param("persist_to", 0) self.sdk_retries = self.input.param("sdk_retries", 5) self.sdk_timeout = self.input.param("sdk_timeout", 5) self.time_unit = self.input.param("time_unit", "seconds") self.durability_level = self.input.param("durability", "").upper() self.sdk_client_pool = self.input.param("sdk_client_pool", None) self.sdk_pool_capacity = self.input.param("sdk_pool_capacity", 1) # Client compression settings self.sdk_compression = self.input.param("sdk_compression", None) compression_min_ratio = self.input.param("min_ratio", None) compression_min_size = self.input.param("min_size", None) if type(self.sdk_compression) is bool: self.sdk_compression = {"enabled": self.sdk_compression} if compression_min_size: self.sdk_compression["minSize"] = compression_min_size if compression_min_ratio: self.sdk_compression["minRatio"] = compression_min_ratio # Doc Loader Params self.process_concurrency = self.input.param("process_concurrency", 20) self.batch_size = self.input.param("batch_size", 2000) self.dgm_batch = self.input.param("dgm_batch", 5000) self.ryow = self.input.param("ryow", False) self.check_persistence = self.input.param("check_persistence", False) # End of client specific parameters # initial number of items in the cluster self.services_init = self.input.param("services_init", None) self.nodes_init = self.input.param("nodes_init", 1) self.nodes_in = self.input.param("nodes_in", 1) self.nodes_out = self.input.param("nodes_out", 1) self.services_in = self.input.param("services_in", None) self.forceEject = self.input.param("forceEject", False) self.wait_timeout = self.input.param("wait_timeout", 120) self.verify_unacked_bytes = \ self.input.param("verify_unacked_bytes", False) self.disabled_consistent_view = \ self.input.param("disabled_consistent_view", None) self.rebalanceIndexWaitingDisabled = \ self.input.param("rebalanceIndexWaitingDisabled", None) self.rebalanceIndexPausingDisabled = \ self.input.param("rebalanceIndexPausingDisabled", None) self.maxParallelIndexers = \ self.input.param("maxParallelIndexers", None) self.maxParallelReplicaIndexers = \ self.input.param("maxParallelReplicaIndexers", None) self.quota_percent = self.input.param("quota_percent", 90) self.skip_buckets_handle = self.input.param("skip_buckets_handle", False) # SDKClientPool object for creating generic clients across tasks if self.sdk_client_pool is True: self.init_sdk_pool_object() # Initiate logging variables self.log = logger.get("test") self.infra_log = logger.get("infra") self.cleanup_pcaps() self.collect_pcaps = self.input.param("collect_pcaps", False) if self.collect_pcaps: self.start_collect_pcaps() # variable for log collection using cbCollect self.get_cbcollect_info = self.input.param("get-cbcollect-info", False) # Variable for initializing the current (start of test) timestamp self.start_timestamp = datetime.now() ''' Be careful while using this flag. This is only and only for stand-alone tests. During bugs reproductions, when a crash is seen stop_server_on_crash will stop the server so that we can collect data/logs/dumps at the right time ''' self.stop_server_on_crash = self.input.param("stop_server_on_crash", False) self.collect_data = self.input.param("collect_data", False) # Configure loggers self.log.setLevel(self.log_level) self.infra_log.setLevel(self.infra_log_level) # Support lib objects for testcase execution self.task_manager = TaskManager(self.thread_to_use) self.task = ServerTasks(self.task_manager) # End of library object creation self.sleep = sleep self.cleanup = False self.nonroot = False self.test_failure = None self.crash_warning = self.input.param("crash_warning", False) self.summary = TestSummary(self.log) # Populate memcached_port in case of cluster_run cluster_run_base_port = ClusterRun.port if int(self.input.servers[0].port) == ClusterRun.port: for server in self.input.servers: server.port = cluster_run_base_port cluster_run_base_port += 1 # If not defined in node.ini under 'memcached_port' section if server.memcached_port is CbServer.memcached_port: server.memcached_port = \ ClusterRun.memcached_port \ + (2 * (int(server.port) - ClusterRun.port)) self.log_setup_status(self.__class__.__name__, "started") cluster_name_format = "C%s" default_cluster_index = counter_index = 1 if len(self.input.clusters) > 1: # Multi cluster setup for _, nodes in self.input.clusters.iteritems(): cluster_name = cluster_name_format % counter_index tem_cluster = CBCluster(name=cluster_name, servers=nodes) self.cb_clusters[cluster_name] = tem_cluster counter_index += 1 else: # Single cluster cluster_name = cluster_name_format % counter_index self.cb_clusters[cluster_name] = CBCluster(name=cluster_name, servers=self.servers) # Initialize self.cluster with first available cluster as default self.cluster = self.cb_clusters[cluster_name_format % default_cluster_index] self.cluster_util = ClusterUtils(self.cluster, self.task_manager) self.bucket_util = BucketUtils(self.cluster_util, self.task) if self.standard_buckets > 10: self.bucket_util.change_max_buckets(self.cluster.master, self.standard_buckets) for cluster_name, cluster in self.cb_clusters.items(): shell = RemoteMachineShellConnection(cluster.master) self.os_info = shell.extract_remote_info().type.lower() if self.os_info != 'windows': if cluster.master.ssh_username != "root": self.nonroot = True shell.disconnect() break shell.disconnect() """ some tests need to bypass checking cb server at set up to run installation """ self.skip_init_check_cbserver = \ self.input.param("skip_init_check_cbserver", False) try: if self.skip_setup_cleanup: self.cluster.buckets = self.bucket_util.get_all_buckets( self.cluster) return self.services_map = None self.log_setup_status("BaseTestCase", "started") for cluster_name, cluster in self.cb_clusters.items(): if not self.skip_buckets_handle \ and not self.skip_init_check_cbserver: self.log.debug("Cleaning up cluster") cluster_util = ClusterUtils(cluster, self.task_manager) bucket_util = BucketUtils(cluster_util, self.task) cluster_util.cluster_cleanup(bucket_util) # Avoid cluster operations in setup for new upgrade / upgradeXDCR if str(self.__class__).find('newupgradetests') != -1 or \ str(self.__class__).find('upgradeXDCR') != -1 or \ str(self.__class__).find('Upgrade_EpTests') != -1 or \ self.skip_buckets_handle: self.log.warning("Cluster operation in setup will be skipped") self.primary_index_created = True self.log_setup_status("BaseTestCase", "finished") return # avoid clean up if the previous test has been tear down if self.case_number == 1 or self.case_number > 1000: if self.case_number > 1000: self.log.warn("TearDown for prev test failed. Will retry") self.case_number -= 1000 self.cleanup = True if not self.skip_init_check_cbserver: self.tearDownEverything() self.tear_down_while_setup = False if not self.skip_init_check_cbserver: for cluster_name, cluster in self.cb_clusters.items(): self.log.info("Initializing cluster") cluster_util = ClusterUtils(cluster, self.task_manager) cluster_util.reset_cluster() master_services = cluster_util.get_services( cluster.servers[:1], self.services_init, start_node=0) if master_services is not None: master_services = master_services[0].split(",") self.quota = self._initialize_nodes( self.task, cluster, self.disabled_consistent_view, self.rebalanceIndexWaitingDisabled, self.rebalanceIndexPausingDisabled, self.maxParallelIndexers, self.maxParallelReplicaIndexers, self.port, self.quota_percent, services=master_services) cluster_util.change_env_variables() cluster_util.change_checkpoint_params() self.log.info("{0} initialized".format(cluster)) else: self.quota = "" # Enable dp_version since we need collections enabled if self.enable_dp: for server in self.cluster.servers: shell_conn = RemoteMachineShellConnection(server) cb_cli = CbCli(shell_conn) cb_cli.enable_dp() shell_conn.disconnect() for cluster_name, cluster in self.cb_clusters.items(): cluster_util = ClusterUtils(cluster, self.task_manager) if self.log_info: cluster_util.change_log_info() if self.log_location: cluster_util.change_log_location() if self.stat_info: cluster_util.change_stat_info() if self.port_info: cluster_util.change_port_info() if self.port: self.port = str(self.port) self.log_setup_status("BaseTestCase", "finished") if not self.skip_init_check_cbserver: self.__log("started") except Exception as e: traceback.print_exc() self.task.shutdown(force=True) self.fail(e)
def setUp(self): self.log = logger.Logger.get_logger() self.input = TestInputSingleton.input self.primary_index_created = False self.use_sdk_client = self.input.param("use_sdk_client", False) if self.input.param("log_level", None): log.setLevel(level=0) for hd in log.handlers: if str(hd.__class__).find('FileHandler') != -1: hd.setLevel(level=logging.DEBUG) else: hd.setLevel(level=getattr( logging, self.input.param("log_level", None))) self.servers = self.input.servers self.buckets = [] self.case_number = self.input.param("case_number", 0) self.thread_to_use = self.input.param("threads_to_use", 10) self.cluster = CBCluster(servers=self.input.servers) self.task_manager = TaskManager(self.thread_to_use) self.cluster_util = cluster_utils(self.cluster, self.task_manager) self.bucket_util = bucket_utils(self.cluster, self.task_manager, self.cluster_util) self.task = ServerTasks(self.task_manager) self.cleanup = False self.nonroot = False shell = RemoteMachineShellConnection(self.cluster.master) self.os_info = shell.extract_remote_info().type.lower() if self.os_info != 'windows': if self.cluster.master.ssh_username != "root": self.nonroot = True shell.disconnect() """ some tests need to bypass checking cb server at set up to run installation """ self.skip_init_check_cbserver = self.input.param( "skip_init_check_cbserver", False) try: self.vbuckets = self.input.param("vbuckets", 1024) self.skip_setup_cleanup = self.input.param("skip_setup_cleanup", False) self.index_quota_percent = self.input.param( "index_quota_percent", None) self.num_servers = self.input.param("servers", len(self.cluster.servers)) # initial number of items in the cluster self.services_init = self.input.param("services_init", None) self.nodes_init = self.input.param("nodes_init", 1) self.nodes_in = self.input.param("nodes_in", 1) self.nodes_out = self.input.param("nodes_out", 1) self.services_in = self.input.param("services_in", None) self.forceEject = self.input.param("forceEject", False) self.num_items = self.input.param("num_items", 100000) self.num_replicas = self.input.param("replicas", 1) self.value_size = self.input.param("value_size", 1) self.wait_timeout = self.input.param("wait_timeout", 60) self.dgm_run = self.input.param("dgm_run", False) self.active_resident_threshold = int( self.input.param("active_resident_threshold", 0)) self.verify_unacked_bytes = self.input.param( "verify_unacked_bytes", False) self.force_kill_memcached = TestInputSingleton.input.param( 'force_kill_memcached', False) self.disabled_consistent_view = self.input.param( "disabled_consistent_view", None) self.rebalanceIndexWaitingDisabled = self.input.param( "rebalanceIndexWaitingDisabled", None) self.rebalanceIndexPausingDisabled = self.input.param( "rebalanceIndexPausingDisabled", None) self.maxParallelIndexers = self.input.param( "maxParallelIndexers", None) self.maxParallelReplicaIndexers = self.input.param( "maxParallelReplicaIndexers", None) self.quota_percent = self.input.param("quota_percent", None) self.port = None self.log_info = self.input.param("log_info", None) self.log_location = self.input.param("log_location", None) self.stat_info = self.input.param("stat_info", None) self.port_info = self.input.param("port_info", None) if not hasattr(self, 'skip_buckets_handle'): self.skip_buckets_handle = self.input.param( "skip_buckets_handle", False) self.test_timeout = self.input.param( "test_timeout", 3600) # kill hang test and jump to next one. self.gsi_type = self.input.param("gsi_type", 'plasma') self.compression_mode = self.input.param("compression_mode", 'passive') self.sdk_compression = self.input.param("sdk_compression", True) self.replicate_to = self.input.param("replicate_to", 0) self.persist_to = self.input.param("persist_to", 0) #jre-path for cbas self.jre_path = self.input.param("jre_path", None) # end of bucket parameters spot (this is ongoing) if self.skip_setup_cleanup: self.buckets = self.bucket_util.get_all_buckets() return if not self.skip_init_check_cbserver: self.cb_version = None if RestHelper(RestConnection( self.cluster.master)).is_ns_server_running(): """ since every new couchbase version, there will be new features that test code will not work on previous release. So we need to get couchbase version to filter out those tests. """ self.cb_version = RestConnection( self.cluster.master).get_nodes_version() else: log.info("couchbase server does not run yet") self.protocol = self.cluster_util.get_protocol_type() self.services_map = None log.info("============== basetestcase setup was started for test #{0} {1}==============" \ .format(self.case_number, self._testMethodName)) if not self.skip_buckets_handle and not self.skip_init_check_cbserver: self.cluster_util.cluster_cleanup(self.bucket_util) # avoid any cluster operations in setup for new upgrade # & upgradeXDCR tests if str(self.__class__).find('newupgradetests') != -1 or \ str(self.__class__).find('upgradeXDCR') != -1 or \ str(self.__class__).find('Upgrade_EpTests') != -1 or \ hasattr(self, 'skip_buckets_handle') and \ self.skip_buckets_handle: log.info("any cluster operation in setup will be skipped") self.primary_index_created = True log.info("============== basetestcase setup was finished for test #{0} {1} ==============" \ .format(self.case_number, self._testMethodName)) return # avoid clean up if the previous test has been tear down if self.case_number == 1 or self.case_number > 1000: if self.case_number > 1000: log.warn( "teardDown for previous test failed. will retry..") self.case_number -= 1000 self.cleanup = True if not self.skip_init_check_cbserver: self.tearDownEverything() self.task = ServerTasks(self.task_manager) if not self.skip_init_check_cbserver: log.info("initializing cluster") # self.cluster_util.reset_cluster() master_services = self.cluster_util.get_services(self.servers[:1], \ self.services_init, \ start_node=0) if master_services != None: master_services = master_services[0].split(",") self.quota = self._initialize_nodes(self.task, self.cluster.servers, \ self.disabled_consistent_view, \ self.rebalanceIndexWaitingDisabled, \ self.rebalanceIndexPausingDisabled, \ self.maxParallelIndexers, \ self.maxParallelReplicaIndexers, \ self.port, \ self.quota_percent, \ services=master_services) self.cluster_util.change_env_variables() self.cluster_util.change_checkpoint_params() log.info("done initializing cluster") else: self.quota = "" if self.input.param("log_info", None): self.cluster_util.change_log_info() if self.input.param("log_location", None): self.cluster_util.change_log_location() if self.input.param("stat_info", None): self.cluster_util.change_stat_info() if self.input.param("port_info", None): self.cluster_util.change_port_info() if self.input.param("port", None): self.port = str(self.input.param("port", None)) log.info("============== basetestcase setup was finished for test #{0} {1} ==============" \ .format(self.case_number, self._testMethodName)) if not self.skip_init_check_cbserver: self._log_start() self.sleep(5) except Exception, e: traceback.print_exc() self.task.shutdown(force=True) self.fail(e)
def setUp(self): """ Since BaseTestCase will initialize at least one cluster, we pass service for the master node of that cluster """ if not hasattr(self, "input"): self.input = TestInputSingleton.input """ Cluster node services. Parameter value format serv1:serv2-serv1:ser2|serv1:serv2-ser1:serv2 | -> separates services per cluster. - -> separates services on each node of the cluster. : -> separates services on a node. """ temp_service_init = [x.split("-") for x in self.input.param( "services_init", "kv:n1ql:index").split("|")] self.input.test_params.update( {"services_init": temp_service_init[0][0]}) """ Number of nodes per cluster. Parameter value format num_nodes_cluster1|num_nodes_cluster2|.... | -> separates number of nodes per cluster. """ if not isinstance(self.input.param("nodes_init", 1), int): temp_nodes_init = [int(x) for x in self.input.param( "nodes_init", 1).split("|")] else: temp_nodes_init = [self.input.param("nodes_init", 1)] super(CBASBaseTest, self).setUp() if self._testMethodDoc: self.log.info("Starting Test: %s - %s" % (self._testMethodName, self._testMethodDoc)) else: self.log.info("Starting Test: %s" % self._testMethodName) self.services_init = temp_service_init self.nodes_init = temp_nodes_init """ Parameterized Support for multiple cluster instead of creating multiple clusters from ini file. """ self.num_of_clusters = self.input.param('num_of_clusters', 1) """ Since BaseTestCase will initialize at least one cluster, we need to modify the initialized cluster server property to correctly reflect the servers in that cluster. """ start = 0 end = self.nodes_init[0] cluster = self.cb_clusters[self.cb_clusters.keys()[0]] cluster.servers = self.servers[start:end] cluster.nodes_in_cluster.append(cluster.master) cluster.kv_nodes.append(cluster.master) if "cbas" in cluster.master.services: cluster.cbas_nodes.append(cluster.master) """ Since BaseTestCase will initialize at least one cluster, we need to initialize only total clusters required - 1. """ cluster_name_format = "C%s" for i in range(1, self.num_of_clusters): start = end end += self.nodes_init[i] cluster_name = cluster_name_format % str(i+1) cluster = CBCluster( name=cluster_name, servers=self.servers[start:end]) self.cb_clusters[cluster_name] = cluster cluster.nodes_in_cluster.append(cluster.master) cluster.kv_nodes.append(cluster.master) if "cbas" in cluster.master.services: cluster.cbas_nodes.append(cluster.master) self.initialize_cluster(cluster_name, cluster, services=self.services_init[i][0]) self.modify_cluster_settings(cluster) self.available_servers = self.servers[end:] """ KV infra to be created per cluster. Accepted values are - bkt_spec : will create KV infra based on bucket spec. bucket_spec param needs to be passed. default : will create a bucket named default on the cluster. None : no buckets will be created on cluster | -> separates number of nodes per cluster. """ if self.input.param("cluster_kv_infra", None): self.cluster_kv_infra = self.input.param("cluster_kv_infra", None).split("|") if len(self.cluster_kv_infra) < self.num_of_clusters: self.cluster_kv_infra.extend( [None] * (self.num_of_clusters - len(self.cluster_kv_infra))) else: self.cluster_kv_infra = [None] * self.num_of_clusters # Common properties self.num_concurrent_queries = self.input.param('num_queries', 5000) self.concurrent_batch_size = self.input.param('concurrent_batch_size', 100) self.index_fields = self.input.param('index_fields', None) if self.index_fields: self.index_fields = self.index_fields.split("-") self.retry_time = self.input.param("retry_time", 300) self.num_retries = self.input.param("num_retries", 1) self.cbas_spec_name = self.input.param("cbas_spec", None) self.expected_error = self.input.param("error", None) self.bucket_spec = self.input.param("bucket_spec", None) self.doc_spec_name = self.input.param("doc_spec_name", "initial_load") self.set_default_cbas_memory = self.input.param( 'set_default_cbas_memory', False) self.cbas_memory_quota_percent = int(self.input.param( "cbas_memory_quota_percent", 100)) self.bucket_size = self.input.param("bucket_size", 250) self.cbas_util = CbasUtil(self.task) # Add nodes to the cluster as per node_init param. for i, (cluster_name, cluster) in enumerate(self.cb_clusters.items()): cluster.rest = RestConnection(cluster.master) for j, server in enumerate(cluster.servers): if server.ip != cluster.master.ip: server.services = self.services_init[i][j].replace(":", ",") if "cbas" in server.services: cluster.cbas_nodes.append(server) if "kv" in server.services: cluster.kv_nodes.append(server) rest = RestConnection(server) rest.set_data_path( data_path=server.data_path, index_path=server.index_path, cbas_path=server.cbas_path) if self.set_default_cbas_memory: self.log.info( "Setting the min possible memory quota so that adding " "more nodes to the cluster wouldn't be a problem.") cluster.rest.set_service_mem_quota( { CbServer.Settings.KV_MEM_QUOTA: MIN_KV_QUOTA, CbServer.Settings.FTS_MEM_QUOTA: FTS_QUOTA, CbServer.Settings.INDEX_MEM_QUOTA: INDEX_QUOTA }) self.log.info("Setting %d memory quota for CBAS" % CBAS_QUOTA) cluster.cbas_memory_quota = CBAS_QUOTA cluster.rest.set_service_mem_quota( { CbServer.Settings.CBAS_MEM_QUOTA: CBAS_QUOTA }) else: self.set_memory_for_services( cluster, server, server.services) if cluster.servers[1:]: self.task.rebalance( [cluster.master], cluster.servers[1:], [], services=[server.services for server in cluster.servers[1:]]) cluster.nodes_in_cluster.extend(cluster.servers[1:]) if cluster.cbas_nodes: cbas_cc_node_ip = None retry = 0 while True and retry < 60: cbas_cc_node_ip = self.cbas_util.retrieve_cc_ip_from_master( cluster) if cbas_cc_node_ip: break else: self.sleep(10, "Waiting for CBAS service to come up") retry += 1 for server in cluster.cbas_nodes: if server.ip == cbas_cc_node_ip: cluster.cbas_cc_node = server break if "cbas" in cluster.master.services: self.cbas_util.cleanup_cbas(cluster) cluster.otpNodes = cluster.rest.node_statuses() if self.cluster_kv_infra[i] == "bkt_spec": if self.bucket_spec is not None: try: self.collectionSetUp(cluster) except Java_base_exception as exception: self.handle_setup_exception(exception) except Exception as exception: self.handle_setup_exception(exception) else: self.fail("Error : bucket_spec param needed") elif self.cluster_kv_infra[i] == "default": self.bucket_util.create_default_bucket( cluster, bucket_type=self.bucket_type, ram_quota=self.bucket_size, replica=self.num_replicas, conflict_resolution=self.bucket_conflict_resolution_type, replica_index=self.bucket_replica_index, storage=self.bucket_storage, eviction_policy=self.bucket_eviction_policy, flush_enabled=self.flush_enabled) self.bucket_util.add_rbac_user(cluster.master) # Wait for analytics service to be up. if hasattr(cluster, "cbas_util"): if not self.cbas_util.is_analytics_running(cluster): self.fail("Analytics service did not come up even after 10\ mins of wait after initialisation") self.log.info("=== CBAS_BASE setup was finished for test #{0} {1} ===" .format(self.case_number, self._testMethodName))
def setUp(self): self.failover_util = failover_utils() self.node_util = node_utils() self.views_util = views_utils() self.log = logger.Logger.get_logger() self.input = TestInputSingleton.input self.primary_index_created = False self.use_sdk_client = self.input.param("use_sdk_client", False) if self.input.param("log_level", None): log.setLevel(level=0) for hd in log.handlers: if str(hd.__class__).find('FileHandler') != -1: hd.setLevel(level=logging.DEBUG) else: hd.setLevel(level=getattr(logging, self.input.param("log_level", None))) self.servers = self.input.servers if str(self.__class__).find('moxitests') != -1: self.moxi_server = self.input.moxis[0] self.servers = [server for server in self.servers if server.ip != self.moxi_server.ip] self.buckets = [] self.cluster = CBCluster(servers=self.input.servers) self.bucket_util = bucket_utils(self.cluster) self.cluster_util = cluster_utils(self.cluster) self.task = ServerTasks() self.pre_warmup_stats = {} self.cleanup = False self.nonroot = False shell = RemoteMachineShellConnection(self.cluster.master) self.os_info = shell.extract_remote_info().type.lower() if self.os_info != 'windows': if self.cluster.master.ssh_username != "root": self.nonroot = True shell.disconnect() """ some tests need to bypass checking cb server at set up to run installation """ self.skip_init_check_cbserver = \ self.input.param("skip_init_check_cbserver", False) self.data_collector = DataCollector() self.data_analyzer = DataAnalyzer() self.result_analyzer = DataAnalysisResultAnalyzer() try: self.skip_setup_cleanup = self.input.param("skip_setup_cleanup", False) self.index_quota_percent = self.input.param("index_quota_percent", None) self.targetIndexManager = self.input.param("targetIndexManager", False) self.targetMaster = self.input.param("targetMaster", False) self.reset_services = self.input.param("reset_services", False) self.auth_mech = self.input.param("auth_mech", "PLAIN") self.wait_timeout = self.input.param("wait_timeout", 60) # number of case that is performed from testrunner( increment each time) self.case_number = self.input.param("case_number", 0) self.default_bucket = self.input.param("default_bucket", True) self.parallelism = self.input.param("parallelism", False) self.verify_unacked_bytes = self.input.param("verify_unacked_bytes", False) self.enable_flow_control = self.input.param("enable_flow_control", False) self.num_servers = self.input.param("servers", len(self.cluster.servers)) # initial number of items in the cluster self.nodes_init = self.input.param("nodes_init", 1) self.nodes_in = self.input.param("nodes_in", 1) self.nodes_out = self.input.param("nodes_out", 1) self.services_init = self.input.param("services_init", None) self.services_in = self.input.param("services_in", None) self.forceEject = self.input.param("forceEject", False) self.force_kill_memcached = TestInputSingleton.input.param('force_kill_memcached', False) self.num_items = self.input.param("items", 1000) self.value_size = self.input.param("value_size", 512) self.dgm_run = self.input.param("dgm_run", False) self.active_resident_threshold = int(self.input.param("active_resident_threshold", 0)) # max items number to verify in ValidateDataTask, None - verify all self.max_verify = self.input.param("max_verify", None) # we don't change consistent_view on server by default self.disabled_consistent_view = self.input.param("disabled_consistent_view", None) self.rebalanceIndexWaitingDisabled = self.input.param("rebalanceIndexWaitingDisabled", None) self.rebalanceIndexPausingDisabled = self.input.param("rebalanceIndexPausingDisabled", None) self.maxParallelIndexers = self.input.param("maxParallelIndexers", None) self.maxParallelReplicaIndexers = self.input.param("maxParallelReplicaIndexers", None) self.quota_percent = self.input.param("quota_percent", None) self.port = None self.log_message = self.input.param("log_message", None) self.log_info = self.input.param("log_info", None) self.log_location = self.input.param("log_location", None) self.stat_info = self.input.param("stat_info", None) self.port_info = self.input.param("port_info", None) if not hasattr(self, 'skip_buckets_handle'): self.skip_buckets_handle = self.input.param("skip_buckets_handle", False) self.nodes_out_dist = self.input.param("nodes_out_dist", None) self.absolute_path = self.input.param("absolute_path", True) self.test_timeout = self.input.param("test_timeout", 3600) # kill hang test and jump to next one. self.enable_bloom_filter = self.input.param("enable_bloom_filter", False) # self.enable_time_sync = self.input.param("enable_time_sync", False) self.gsi_type = self.input.param("gsi_type", 'plasma') #jre-path for cbas self.jre_path=self.input.param("jre_path",None) # end of bucket parameters spot (this is ongoing) if self.skip_setup_cleanup: self.buckets = BucketHelper(self.master).get_buckets() return if not self.skip_init_check_cbserver: self.cb_version = None if RestHelper(RestConnection(self.cluster.master)).is_ns_server_running(): """ since every new couchbase version, there will be new features that test code will not work on previous release. So we need to get couchbase version to filter out those tests. """ self.cb_version = RestConnection(self.cluster.master).get_nodes_version() else: log.info("couchbase server does not run yet") self.protocol = self.cluster_util.get_protocol_type() self.services_map = None log.info("============== basetestcase setup was started for test #{0} {1}==============" \ .format(self.case_number, self._testMethodName)) if not self.skip_buckets_handle and not self.skip_init_check_cbserver: self.cluster_util._cluster_cleanup(self.bucket_util) # avoid any cluster operations in setup for new upgrade # & upgradeXDCR tests if str(self.__class__).find('newupgradetests') != -1 or \ str(self.__class__).find('upgradeXDCR') != -1 or \ str(self.__class__).find('Upgrade_EpTests') != -1 or \ hasattr(self, 'skip_buckets_handle') and \ self.skip_buckets_handle: log.info("any cluster operation in setup will be skipped") self.primary_index_created = True log.info("============== basetestcase setup was finished for test #{0} {1} ==============" \ .format(self.case_number, self._testMethodName)) return # avoid clean up if the previous test has been tear down if self.case_number == 1 or self.case_number > 1000: if self.case_number > 1000: log.warn("teardDown for previous test failed. will retry..") self.case_number -= 1000 self.cleanup = True if not self.skip_init_check_cbserver: self.tearDownEverything() self.task = ServerTasks() if not self.skip_init_check_cbserver: log.info("initializing cluster") self.cluster_util.reset_cluster(self.targetMaster, self.reset_services) master_services = self.cluster_util.get_services(self.servers[:1], \ self.services_init, \ start_node=0) if master_services != None: master_services = master_services[0].split(",") self.quota = self._initialize_nodes(self.task, self.cluster.servers, \ self.disabled_consistent_view, \ self.rebalanceIndexWaitingDisabled, \ self.rebalanceIndexPausingDisabled, \ self.maxParallelIndexers, \ self.maxParallelReplicaIndexers, \ self.port, \ self.quota_percent, \ services=master_services) self.cluster_util.change_env_variables() self.cluster_util.change_checkpoint_params() # Add built-in user if not self.skip_init_check_cbserver: self.add_built_in_server_user(node=self.cluster.master) log.info("done initializing cluster") else: self.quota = "" if self.input.param("log_info", None): self.cluster_util.change_log_info() if self.input.param("log_location", None): self.cluster_util.change_log_location() if self.input.param("stat_info", None): self.cluster_util.change_stat_info() if self.input.param("port_info", None): self.cluster_util.change_port_info() if self.input.param("port", None): self.port = str(self.input.param("port", None)) try: if (str(self.__class__).find('rebalanceout.RebalanceOutTests') != -1) or \ (str(self.__class__).find('memorysanitytests.MemorySanity') != -1) or \ str(self.__class__).find('negativetests.NegativeTests') != -1 or \ str(self.__class__).find('warmuptest.WarmUpTests') != -1 or \ str(self.__class__).find('failover.failovertests.FailoverTests') != -1 or \ str(self.__class__).find('observe.observeseqnotests.ObserveSeqNoTests') != -1 or \ str(self.__class__).find('epengine.lwwepengine.LWW_EP_Engine') != -1: self.services = self.get_services(self.servers, self.services_init) # rebalance all nodes into the cluster before each test self.task.rebalance(self.servers[:self.num_servers], self.servers[1:self.num_servers], [], services=self.services) elif self.nodes_init > 1 and not self.skip_init_check_cbserver: self.services = self.get_services(self.servers[:self.nodes_init], self.services_init) self.task.rebalance(self.servers[:1], \ self.servers[1:self.nodes_init], \ [], services=self.services) elif str(self.__class__).find('ViewQueryTests') != -1 and \ not self.input.param("skip_rebalance", False): self.services = self.get_services(self.servers, self.services_init) self.task.rebalance(self.servers, self.servers[1:], [], services=self.services) self.cluster_util.setDebugLevel(service_type="index") except BaseException, e: # increase case_number to retry tearDown in setup for the next test self.case_number += 1000 self.fail(e) log.info("============== basetestcase setup was finished for test #{0} {1} ==============" \ .format(self.case_number, self._testMethodName)) if not self.skip_init_check_cbserver: self._log_start(self) self.sleep(10)
def setUp(self): super(OnPremBaseTest, self).setUp() # Framework specific parameters (Extension from cb_basetest) self.skip_cluster_reset = self.input.param("skip_cluster_reset", False) self.skip_setup_cleanup = self.input.param("skip_setup_cleanup", False) # End of framework parameters # Cluster level info settings self.log_info = self.input.param("log_info", None) self.log_location = self.input.param("log_location", None) self.stat_info = self.input.param("stat_info", None) self.port = self.input.param("port", None) self.port_info = self.input.param("port_info", None) self.servers = self.input.servers self.num_servers = self.input.param("servers", len(self.servers)) self.vbuckets = self.input.param("vbuckets", CbServer.total_vbuckets) self.gsi_type = self.input.param("gsi_type", 'plasma') # Memory quota settings # Max memory quota to utilize per node self.quota_percent = self.input.param("quota_percent", 100) # Services' RAM quota to set on cluster self.kv_mem_quota_percent = self.input.param("kv_quota_percent", None) self.index_mem_quota_percent = \ self.input.param("index_quota_percent", None) self.fts_mem_quota_percent = \ self.input.param("fts_quota_percent", None) self.cbas_mem_quota_percent = \ self.input.param("cbas_quota_percent", None) self.eventing_mem_quota_percent = \ self.input.param("eventing_quota_percent", None) # CBAS setting self.jre_path = self.input.param("jre_path", None) self.enable_dp = self.input.param("enable_dp", False) # End of cluster info parameters # Bucket specific params # Note: Over riding bucket_eviction_policy from CouchbaseBaseTest self.bucket_eviction_policy = \ self.input.param("bucket_eviction_policy", Bucket.EvictionPolicy.VALUE_ONLY) self.bucket_replica_index = self.input.param("bucket_replica_index", 1) if self.bucket_storage == Bucket.StorageBackend.magma: self.bucket_eviction_policy = Bucket.EvictionPolicy.FULL_EVICTION # End of bucket parameters self.services_in = self.input.param("services_in", None) self.forceEject = self.input.param("forceEject", False) self.wait_timeout = self.input.param("wait_timeout", 120) self.verify_unacked_bytes = \ self.input.param("verify_unacked_bytes", False) self.disabled_consistent_view = \ self.input.param("disabled_consistent_view", None) self.rebalanceIndexWaitingDisabled = \ self.input.param("rebalanceIndexWaitingDisabled", None) self.rebalanceIndexPausingDisabled = \ self.input.param("rebalanceIndexPausingDisabled", None) self.maxParallelIndexers = \ self.input.param("maxParallelIndexers", None) self.maxParallelReplicaIndexers = \ self.input.param("maxParallelReplicaIndexers", None) self.use_https = self.input.param("use_https", False) self.enforce_tls = self.input.param("enforce_tls", False) self.ipv4_only = self.input.param("ipv4_only", False) self.ipv6_only = self.input.param("ipv6_only", False) self.multiple_ca = self.input.param("multiple_ca", False) if self.use_https: CbServer.use_https = True trust_all_certs() self.node_utils.cleanup_pcaps(self.servers) self.collect_pcaps = self.input.param("collect_pcaps", False) if self.collect_pcaps: self.node_utils.start_collect_pcaps(self.servers) ''' Be careful while using this flag. This is only and only for stand-alone tests. During bugs reproductions, when a crash is seen stop_server_on_crash will stop the server so that we can collect data/logs/dumps at the right time ''' self.stop_server_on_crash = self.input.param("stop_server_on_crash", False) self.collect_data = self.input.param("collect_data", False) self.validate_system_event_logs = \ self.input.param("validate_sys_event_logs", False) self.nonroot = False self.crash_warning = self.input.param("crash_warning", False) # Populate memcached_port in case of cluster_run cluster_run_base_port = ClusterRun.port if int(self.input.servers[0].port) == ClusterRun.port: for server in self.input.servers: server.port = cluster_run_base_port cluster_run_base_port += 1 # If not defined in node.ini under 'memcached_port' section if server.memcached_port is CbServer.memcached_port: server.memcached_port = \ ClusterRun.memcached_port \ + (2 * (int(server.port) - ClusterRun.port)) self.log_setup_status(self.__class__.__name__, "started") cluster_name_format = "C%s" default_cluster_index = counter_index = 1 if len(self.input.clusters) > 1: # Multi cluster setup for _, nodes in self.input.clusters.iteritems(): cluster_name = cluster_name_format % counter_index tem_cluster = CBCluster(name=cluster_name, servers=nodes, vbuckets=self.vbuckets) self.cb_clusters[cluster_name] = tem_cluster counter_index += 1 else: # Single cluster cluster_name = cluster_name_format % counter_index self.cb_clusters[cluster_name] = CBCluster(name=cluster_name, servers=self.servers, vbuckets=self.vbuckets) # Initialize self.cluster with first available cluster as default self.cluster = self.cb_clusters[cluster_name_format % default_cluster_index] self.cluster_util = ClusterUtils(self.task_manager) self.bucket_util = BucketUtils(self.cluster_util, self.task) CbServer.enterprise_edition = \ self.cluster_util.is_enterprise_edition(self.cluster) if CbServer.enterprise_edition: self.cluster.edition = "enterprise" else: self.cluster.edition = "community" if self.standard_buckets > 10: self.bucket_util.change_max_buckets(self.cluster.master, self.standard_buckets) for cluster_name, cluster in self.cb_clusters.items(): # Append initial master node to the nodes_in_cluster list cluster.nodes_in_cluster.append(cluster.master) shell = RemoteMachineShellConnection(cluster.master) self.os_info = shell.extract_remote_info().type.lower() if self.os_info != 'windows': if cluster.master.ssh_username != "root": self.nonroot = True shell.disconnect() break shell.disconnect() self.log_setup_status("OnPremBaseTest", "started") try: # Construct dict of mem. quota percent / mb per service mem_quota_percent = dict() # Construct dict of mem. quota percent per service if self.kv_mem_quota_percent: mem_quota_percent[CbServer.Services.KV] = \ self.kv_mem_quota_percent if self.index_mem_quota_percent: mem_quota_percent[CbServer.Services.INDEX] = \ self.index_mem_quota_percent if self.cbas_mem_quota_percent: mem_quota_percent[CbServer.Services.CBAS] = \ self.cbas_mem_quota_percent if self.fts_mem_quota_percent: mem_quota_percent[CbServer.Services.FTS] = \ self.fts_mem_quota_percent if self.eventing_mem_quota_percent: mem_quota_percent[CbServer.Services.EVENTING] = \ self.eventing_mem_quota_percent if not mem_quota_percent: mem_quota_percent = None if self.skip_setup_cleanup: # Update current server/service map and buckets for the cluster for _, cluster in self.cb_clusters.items(): self.cluster_util.update_cluster_nodes_service_list( cluster) cluster.buckets = self.bucket_util.get_all_buckets(cluster) return else: for cluster_name, cluster in self.cb_clusters.items(): self.log.info("Delete all buckets and rebalance out " "other nodes from '%s'" % cluster_name) self.cluster_util.cluster_cleanup(cluster, self.bucket_util) reload(Cb_constants) # avoid clean up if the previous test has been tear down if self.case_number == 1 or self.case_number > 1000: if self.case_number > 1000: self.log.warn("TearDown for prev test failed. Will retry") self.case_number -= 1000 self.tearDownEverything(reset_cluster_env_vars=False) for cluster_name, cluster in self.cb_clusters.items(): if not self.skip_cluster_reset: self.initialize_cluster( cluster_name, cluster, services=None, services_mem_quota_percent=mem_quota_percent) # Update initial service map for the master node self.cluster_util.update_cluster_nodes_service_list(cluster) # Set this unconditionally RestConnection(cluster.master).set_internalSetting( "magmaMinMemoryQuota", 256) # Enable dp_version since we need collections enabled if self.enable_dp: tasks = [] for server in self.cluster.servers: task = self.node_utils.async_enable_dp(server) tasks.append(task) for task in tasks: self.task_manager.get_task_result(task) # Enforce tls on nodes of all clusters if self.use_https and self.enforce_tls: for _, cluster in self.cb_clusters.items(): tasks = [] for node in cluster.servers: task = self.node_utils.async_enable_tls(node) tasks.append(task) for task in tasks: self.task_manager.get_task_result(task) self.log.info( "Validating if services obey tls only on servers {0}". format(cluster.servers)) status = self.cluster_util.check_if_services_obey_tls( cluster.servers) if not status: self.fail("Services did not honor enforce tls") # Enforce IPv4 or IPv6 or both if self.ipv4_only or self.ipv6_only: for _, cluster in self.cb_clusters.items(): status, msg = self.cluster_util.enable_disable_ip_address_family_type( cluster, True, self.ipv4_only, self.ipv6_only) if not status: self.fail(msg) self.standard = self.input.param("standard", "pkcs8") self.passphrase_type = self.input.param("passphrase_type", "script") self.encryption_type = self.input.param("encryption_type", "aes256") if self.multiple_ca: for _, cluster in self.cb_clusters.items(): cluster.x509 = x509main( host=cluster.master, standard=self.standard, encryption_type=self.encryption_type, passphrase_type=self.passphrase_type) self.generate_and_upload_cert(cluster.servers, cluster.x509, upload_root_certs=True, upload_node_certs=True, upload_client_certs=True) payload = "name=cbadminbucket&roles=admin&password=password" rest = RestConnection(cluster.master) rest.add_set_builtin_user("cbadminbucket", payload) for cluster_name, cluster in self.cb_clusters.items(): self.modify_cluster_settings(cluster) # Track test start time only if we need system log validation if self.validate_system_event_logs: self.system_events.set_test_start_time() self.log_setup_status("OnPremBaseTest", "finished") self.__log("started") except Exception as e: traceback.print_exc() self.task.shutdown(force=True) self.fail(e) finally: # Track test start time only if we need system log validation if self.validate_system_event_logs: self.system_events.set_test_start_time() self.log_setup_status("OnPremBaseTest", "finished")
def setUp(self): """ Since BaseTestCase will initialize at least one cluster, we pass service for the master node of that cluster """ if not hasattr(self, "input"): self.input = TestInputSingleton.input """ In case of multi cluster setup, if cluster address family needs to be set, then this parameter is required """ if self.input.param("cluster_ip_family", ""): cluster_ip_family = self.input.param("cluster_ip_family", "").split("|") if cluster_ip_family[0] == "ipv4_only": self.input.test_params.update({ "ipv4_only": True, "ipv6_only": False }) elif cluster_ip_family[0] == "ipv6_only": self.input.test_params.update({ "ipv4_only": False, "ipv6_only": True }) elif cluster_ip_family[0] == "ipv4_ipv6": self.input.test_params.update({ "ipv4_only": True, "ipv6_only": True }) else: self.input.test_params.update({ "ipv4_only": False, "ipv6_only": False }) super(CBASBaseTest, self).setUp() """ Cluster node services. Parameter value format serv1:serv2-serv1:ser2|serv1:serv2-ser1:serv2 | -> separates services per cluster. - -> separates services on each node of the cluster. : -> separates services on a node. """ self.services_init = [ x.split("-") for x in self.input.param("services_init", "kv:n1ql:index").split("|") ] """ Number of nodes per cluster. Parameter value format num_nodes_cluster1|num_nodes_cluster2|.... | -> separates number of nodes per cluster. """ if not isinstance(self.input.param("nodes_init", 1), int): self.nodes_init = [ int(x) for x in self.input.param("nodes_init", 1).split("|") ] else: self.nodes_init = [self.input.param("nodes_init", 1)] if self._testMethodDoc: self.log.info("Starting Test: %s - %s" % (self._testMethodName, self._testMethodDoc)) else: self.log.info("Starting Test: %s" % self._testMethodName) """ Parameterized Support for multiple cluster instead of creating multiple clusters from ini file. """ self.num_of_clusters = self.input.param('num_of_clusters', 1) """ Since BaseTestCase will initialize at least one cluster, we need to modify the initialized cluster server property to correctly reflect the servers in that cluster. """ start = 0 end = self.nodes_init[0] cluster = self.cb_clusters[self.cb_clusters.keys()[0]] cluster.servers = self.servers[start:end] if "cbas" in cluster.master.services: cluster.cbas_nodes.append(cluster.master) """ Since BaseTestCase will initialize at least one cluster, we need to initialize only total clusters required - 1. """ cluster_name_format = "C%s" for i in range(1, self.num_of_clusters): start = end end += self.nodes_init[i] cluster_name = cluster_name_format % str(i + 1) cluster = CBCluster(name=cluster_name, servers=self.servers[start:end]) self.cb_clusters[cluster_name] = cluster cluster.nodes_in_cluster.append(cluster.master) cluster.kv_nodes.append(cluster.master) self.initialize_cluster(cluster_name, cluster, services=self.services_init[i][0]) cluster.master.services = self.services_init[i][0].replace( ":", ",") if "cbas" in cluster.master.services: cluster.cbas_nodes.append(cluster.master) if self.input.param("cluster_ip_family", ""): # Enforce IPv4 or IPv6 or both if cluster_ip_family[i] == "ipv4_only": status, msg = self.cluster_util.enable_disable_ip_address_family_type( cluster, True, True, False) if cluster_ip_family[i] == "ipv6_only": status, msg = self.cluster_util.enable_disable_ip_address_family_type( cluster, True, False, True) if cluster_ip_family[i] == "ipv4_ipv6": status, msg = self.cluster_util.enable_disable_ip_address_family_type( cluster, True, True, True) if not status: self.fail(msg) self.modify_cluster_settings(cluster) self.available_servers = self.servers[end:] """ KV infra to be created per cluster. Accepted values are - bkt_spec : will create KV infra based on bucket spec. bucket_spec param needs to be passed. default : will create a bucket named default on the cluster. None : no buckets will be created on cluster | -> separates number of nodes per cluster. """ if self.input.param("cluster_kv_infra", None): self.cluster_kv_infra = self.input.param("cluster_kv_infra", None).split("|") if len(self.cluster_kv_infra) < self.num_of_clusters: self.cluster_kv_infra.extend( [None] * (self.num_of_clusters - len(self.cluster_kv_infra))) else: self.cluster_kv_infra = [None] * self.num_of_clusters # Common properties self.num_concurrent_queries = self.input.param('num_queries', 5000) self.concurrent_batch_size = self.input.param('concurrent_batch_size', 100) self.index_fields = self.input.param('index_fields', None) if self.index_fields: self.index_fields = self.index_fields.split("-") self.retry_time = self.input.param("retry_time", 300) self.num_retries = self.input.param("num_retries", 1) self.cbas_spec_name = self.input.param("cbas_spec", None) self.expected_error = self.input.param("error", None) self.bucket_spec = self.input.param("bucket_spec", "analytics.default") self.doc_spec_name = self.input.param("doc_spec_name", "initial_load") self.set_default_cbas_memory = self.input.param( 'set_default_cbas_memory', False) self.cbas_memory_quota_percent = int( self.input.param("cbas_memory_quota_percent", 100)) self.bucket_size = self.input.param("bucket_size", 250) self.cbas_util = CbasUtil(self.task) self.service_mem_dict = { "kv": [ CbServer.Settings.KV_MEM_QUOTA, CbServer.Settings.MinRAMQuota.KV, 0 ], "fts": [ CbServer.Settings.FTS_MEM_QUOTA, CbServer.Settings.MinRAMQuota.FTS, 0 ], "index": [ CbServer.Settings.INDEX_MEM_QUOTA, CbServer.Settings.MinRAMQuota.INDEX, 0 ], "cbas": [ CbServer.Settings.CBAS_MEM_QUOTA, CbServer.Settings.MinRAMQuota.CBAS, 0 ], } # Add nodes to the cluster as per node_init param. for i, (cluster_name, cluster) in enumerate(self.cb_clusters.items()): cluster.rest = RestConnection(cluster.master) cluster_services = self.cluster_util.get_services_map(cluster) cluster_info = cluster.rest.get_nodes_self() for service in cluster_services: if service != "n1ql": property_name = self.service_mem_dict[service][0] service_mem_in_cluster = cluster_info.__getattribute__( property_name) self.service_mem_dict[service][2] = service_mem_in_cluster j = 1 for server in cluster.servers: if server.ip != cluster.master.ip: server.services = self.services_init[i][j].replace( ":", ",") j += 1 if "cbas" in server.services: cluster.cbas_nodes.append(server) if "kv" in server.services: cluster.kv_nodes.append(server) rest = RestConnection(server) rest.set_data_path(data_path=server.data_path, index_path=server.index_path, cbas_path=server.cbas_path) if self.set_default_cbas_memory: self.log.info( "Setting the min possible memory quota so that adding " "more nodes to the cluster wouldn't be a problem.") cluster.rest.set_service_mem_quota({ CbServer.Settings.KV_MEM_QUOTA: CbServer.Settings.MinRAMQuota.KV, CbServer.Settings.FTS_MEM_QUOTA: CbServer.Settings.MinRAMQuota.FTS, CbServer.Settings.INDEX_MEM_QUOTA: CbServer.Settings.MinRAMQuota.INDEX }) self.log.info("Setting %d memory quota for CBAS" % CbServer.Settings.MinRAMQuota.CBAS) cluster.cbas_memory_quota = CbServer.Settings.MinRAMQuota.CBAS cluster.rest.set_service_mem_quota({ CbServer.Settings.CBAS_MEM_QUOTA: CbServer.Settings.MinRAMQuota.CBAS }) else: self.set_memory_for_services(cluster, server, server.services) if cluster.servers[1:]: self.task.rebalance(cluster, cluster.servers[1:], [], services=[ server.services for server in cluster.servers[1:] ]) if cluster.cbas_nodes: cbas_cc_node_ip = None retry = 0 while True and retry < 60: cbas_cc_node_ip = self.cbas_util.retrieve_cc_ip_from_master( cluster) if cbas_cc_node_ip: break else: self.sleep(10, "Waiting for CBAS service to come up") retry += 1 if not cbas_cc_node_ip: self.fail("CBAS service did not come up even after 10 " "mins.") for server in cluster.cbas_nodes: if server.ip == cbas_cc_node_ip: cluster.cbas_cc_node = server break if "cbas" in cluster.master.services: self.cbas_util.cleanup_cbas(cluster) cluster.otpNodes = cluster.rest.node_statuses() # Wait for analytics service to be up. if hasattr(cluster, "cbas_cc_node"): if not self.cbas_util.is_analytics_running(cluster): self.fail("Analytics service did not come up even after 10\ mins of wait after initialisation") if self.input.param("n2n_encryption", False): self.security_util = SecurityUtils(self.log) rest = RestConnection(cluster.master) self.log.info("Disabling Auto-Failover") if not rest.update_autofailover_settings(False, 120): self.fail("Disabling Auto-Failover failed") self.log.info("Setting node to node encryption level to all") self.security_util.set_n2n_encryption_level_on_nodes( cluster.nodes_in_cluster, level=self.input.param("n2n_encryption_level", "control")) CbServer.use_https = True self.log.info("Enabling Auto-Failover") if not rest.update_autofailover_settings(True, 300): self.fail("Enabling Auto-Failover failed") if self.input.param("analytics_loggers", None): """ This flag is used for setting analytics internal log levels. These logs are helpful while dubugging issues as they provide a deeper insight into working on CBAS service. This flag can be used to set one or more logger for analytics. logger_name_1:level-logger_name_2:level-...... """ cbas_loggers = self.input.param("analytics_loggers", None).split("-") log_level_dict = dict() for logger in cbas_loggers: tmp = logger.split(":") log_level_dict[tmp[0]] = tmp[1] self.log.info("Setting following log levels for analytics - " "{0}".format(log_level_dict)) status, content, response = self.cbas_util.set_log_level_on_cbas( self.cluster, log_level_dict, timeout=120) if not status: self.fail("Error while setting log level for CBAS - " "{0}".format(content)) self.log.info("Verifying whether log levels set successfully") status, content, response = self.cbas_util.get_log_level_on_cbas( self.cluster) match_counter = 0 if status: actual_log_levels = content["loggers"] for logger in actual_log_levels: if (logger["name"] in log_level_dict) and \ logger["level"] == log_level_dict[logger["name"]]: match_counter += 1 if match_counter == len(log_level_dict): self.log.info("All log levels were set successfully") else: self.fail("Some log levels were not set") else: self.fail("Error while fetching log levels") self.disk_optimized_thread_settings = self.input.param( "disk_optimized_thread_settings", False) if self.disk_optimized_thread_settings: self.set_num_writer_and_reader_threads( cluster, num_writer_threads="disk_io_optimized", num_reader_threads="disk_io_optimized") if self.cluster_kv_infra[i] == "bkt_spec": if self.bucket_spec is not None: try: self.collectionSetUp(cluster) except Java_base_exception as exception: self.handle_setup_exception(exception) except Exception as exception: self.handle_setup_exception(exception) else: self.fail("Error : bucket_spec param needed") elif self.cluster_kv_infra[i] == "default": self.bucket_util.create_default_bucket( cluster, bucket_type=self.bucket_type, ram_quota=self.bucket_size, replica=self.num_replicas, conflict_resolution=self.bucket_conflict_resolution_type, replica_index=self.bucket_replica_index, storage=self.bucket_storage, eviction_policy=self.bucket_eviction_policy, flush_enabled=self.flush_enabled) self.bucket_util.add_rbac_user(cluster.master) self.log.info( "=== CBAS_BASE setup was finished for test #{0} {1} ===".format( self.case_number, self._testMethodName))