def create_required_buckets(self): self.log.info("Get the available memory quota") bucket_util = bucket_utils(self.master) self.info = bucket_util.rest.get_nodes_self() threadhold_memory = 1024 total_memory_in_mb = self.info.memoryFree / 1024**2 total_available_memory_in_mb = total_memory_in_mb active_service = self.info.services if "index" in active_service: total_available_memory_in_mb -= self.info.indexMemoryQuota if "fts" in active_service: total_available_memory_in_mb -= self.info.ftsMemoryQuota if "cbas" in active_service: total_available_memory_in_mb -= self.info.cbasMemoryQuota if "eventing" in active_service: total_available_memory_in_mb -= self.info.eventingMemoryQuota available_memory = total_available_memory_in_mb - threadhold_memory self.rest.set_service_mem_quota({ CbServer.Settings.KV_MEM_QUOTA: available_memory, CbServer.Settings.CBAS_MEM_QUOTA: available_memory - 1024, CbServer.Settings.INDEX_MEM_QUOTA: available_memory - 1024 }) self.log.info("Create CB buckets") self.create_bucket(self.master, "GleambookUsers", bucket_ram=available_memory / 3) self.create_bucket(self.master, "GleambookMessages", bucket_ram=available_memory / 3) self.create_bucket(self.master, "ChirpMessages", bucket_ram=available_memory / 3) shell = RemoteMachineShellConnection(self.master) command = 'curl -i -u Administrator:password --data \'ns_bucket:update_bucket_props("ChirpMessages", [{extra_config_string, "cursor_dropping_upper_mark=70;cursor_dropping_lower_mark=50"}]).\' http://%s:8091/diag/eval' % self.master shell.execute_command(command) command = 'curl -i -u Administrator:password --data \'ns_bucket:update_bucket_props("GleambookMessages", [{extra_config_string, "cursor_dropping_upper_mark=70;cursor_dropping_lower_mark=50"}]).\' http://%s:8091/diag/eval' % self.master shell.execute_command(command) command = 'curl -i -u Administrator:password --data \'ns_bucket:update_bucket_props("GleambookUsers", [{extra_config_string, "cursor_dropping_upper_mark=70;cursor_dropping_lower_mark=50"}]).\' http://%s:8091/diag/eval' % self.master shell.execute_command(command) result = RestConnection(self.query_node).query_tool( "CREATE PRIMARY INDEX idx_GleambookUsers ON GleambookUsers;") self.sleep(10, "wait for index creation.") self.assertTrue(result['status'] == "success") result = RestConnection(self.query_node).query_tool( "CREATE PRIMARY INDEX idx_GleambookMessages ON GleambookMessages;") self.sleep(10, "wait for index creation.") self.assertTrue(result['status'] == "success") result = RestConnection(self.query_node).query_tool( "CREATE PRIMARY INDEX idx_ChirpMessages ON ChirpMessages;") self.sleep(10, "wait for index creation.") self.assertTrue(result['status'] == "success")
def create_required_buckets(self): self.log.info("Get the available memory quota") bucket_util = bucket_utils(self.master) self.info = bucket_util.rest.get_nodes_self() threadhold_memory = 1024 total_memory_in_mb = self.info.memoryFree / 1024 ** 2 total_available_memory_in_mb = total_memory_in_mb active_service = self.info.services if "index" in active_service: total_available_memory_in_mb -= self.info.indexMemoryQuota if "fts" in active_service: total_available_memory_in_mb -= self.info.ftsMemoryQuota if "cbas" in active_service: total_available_memory_in_mb -= self.info.cbasMemoryQuota if "eventing" in active_service: total_available_memory_in_mb -= self.info.eventingMemoryQuota print(total_memory_in_mb) available_memory = total_available_memory_in_mb - threadhold_memory self.rest.set_service_memoryQuota(service='memoryQuota', memoryQuota=available_memory) self.rest.set_service_memoryQuota(service='cbasMemoryQuota', memoryQuota=available_memory-1024) self.rest.set_service_memoryQuota(service='indexMemoryQuota', memoryQuota=available_memory-1024) self.log.info("Create CB buckets") self.create_bucket(self.master, "GleambookUsers",bucket_ram=available_memory, replica=0) self.sleep(30, "wait for bucket warmup to complete.") result = RestConnection(self.query_node).query_tool("CREATE PRIMARY INDEX idx_GleambookUsers ON GleambookUsers;") self.sleep(10, "wait for index creation.") self.assertTrue(result['status'] == "success")
def test_cbas_queries_in_parallel_with_data_ingestion_on_multiple_cb_buckets(self): ''' -i b/resources/4-nodes-template.ini -t cbas.cbas_bug_automation.CBASBugAutomation.test_cbas_queries_in_parallel_with_data_ingestion_on_multiple_cb_buckets, default_bucket=False,num_of_cb_buckets=4,items=1000,minutes_to_run=1 ''' self.log.info("Get the available memory quota") bucket_util = bucket_utils(self.master) self.info = bucket_util.rest.get_nodes_self() threadhold_memory = 1024 total_memory_in_mb = self.info.memoryTotal / 1024 ** 2 total_available_memory_in_mb = total_memory_in_mb active_service = self.info.services if "index" in active_service: total_available_memory_in_mb -= self.info.indexMemoryQuota if "fts" in active_service: total_available_memory_in_mb -= self.info.ftsMemoryQuota if "cbas" in active_service: total_available_memory_in_mb -= self.info.cbasMemoryQuota if "eventing" in active_service: total_available_memory_in_mb -= self.info.eventingMemoryQuota print(total_memory_in_mb) available_memory = total_available_memory_in_mb - threadhold_memory self.rest.set_service_memoryQuota(service='memoryQuota', memoryQuota=available_memory) self.log.info("Add a KV nodes") result = self.add_node(self.servers[1], services=["kv"], rebalance=False) self.assertTrue(result, msg="Failed to add KV node.") self.log.info("Add a CBAS nodes") result = self.add_node(self.cbas_servers[0], services=["cbas"], rebalance=True) self.assertTrue(result, msg="Failed to add CBAS node.") self.log.info("Create CB buckets") num_of_cb_buckets = self.input.param("num_of_cb_buckets", 4) for i in range(num_of_cb_buckets): self.create_bucket(self.master, "default" + str(i), bucket_ram=(available_memory / num_of_cb_buckets)) self.log.info("Create connections for CB buckets") for i in range(num_of_cb_buckets): self.cbas_util.createConn("default" + str(i)) self.log.info("Create CBAS buckets") for i in range(num_of_cb_buckets): self.cbas_util.create_bucket_on_cbas(cbas_bucket_name="cbas_default" + str(i), cb_bucket_name="default" + str(i)) self.log.info("Create data-sets") for i in range(num_of_cb_buckets): self.cbas_util.create_dataset_on_bucket(cbas_bucket_name="default" + str(i), cbas_dataset_name="cbas_default_ds" + str(i)) self.log.info("Connect to CBAS buckets") for i in range(num_of_cb_buckets): result = self.cbas_util.connect_to_bucket(cbas_bucket_name="cbas_default" + str(i), cb_bucket_password=self.cb_bucket_password) self.assertTrue(result, msg="Failed to connect cbas bucket") self.log.info("Generate documents") num_of_documents_per_insert_update = self.input.param("items", 1000) load_gen = CBASBugAutomation.generate_documents(0, num_of_documents_per_insert_update) self.log.info("Asynchronously insert documents in CB buckets") tasks = self._async_load_all_buckets(server=self.master, kv_gen=load_gen, op_type="create", exp=0, batch_size=100) for task in tasks: self.log.info(task.get_result()) self.log.info("Asynchronously create/update documents in CB buckets") start_insert_update_from = num_of_documents_per_insert_update end_insert_update_at = start_insert_update_from + num_of_documents_per_insert_update minutes_to_run = self.input.param("minutes_to_run", 5) end_time = datetime.datetime.now() + datetime.timedelta(minutes=int(minutes_to_run)) while datetime.datetime.now() < end_time: try: self.log.info("start creation of new documents") load_gen = CBASBugAutomation.generate_documents(start_insert_update_from, end_insert_update_at) tasks = self._async_load_all_buckets(server=self.master, kv_gen=load_gen, op_type="create", exp=0, batch_size=100) for task in tasks: self.log.info(task.get_result()) self.log.info("start updating of documents created in the last iteration") load_previous_iteration_gen = CBASBugAutomation.generate_documents(start_insert_update_from - num_of_documents_per_insert_update, end_insert_update_at - num_of_documents_per_insert_update) tasks = self._async_load_all_buckets(server=self.master, kv_gen=load_previous_iteration_gen, op_type="update", exp=0, batch_size=100) for task in tasks: self.log.info(task.get_result()) start_insert_update_from = end_insert_update_at end_insert_update_at = end_insert_update_at + num_of_documents_per_insert_update except: pass for i in range(num_of_cb_buckets): try: self.cbas_util.execute_statement_on_cbas_util('select count(*) from `%s`' % ("cbas_default_ds" + str(i))) self.cbas_util.execute_statement_on_cbas_util('select * from `%s`' % ("cbas_default_ds" + str(i))) except Exception as e: self.log.info(str(e)) self.log.info("Assert document count in CBAS dataset") for i in range(num_of_cb_buckets): count_n1ql = self.rest.query_tool('select count(*) from `%s`' % ("default" + str(i)))['results'][0]['$1'] result = self.cbas_util.validate_cbas_dataset_items_count(dataset_name="cbas_default_ds" + str(i), expected_count=count_n1ql, expected_mutated_count=count_n1ql-num_of_documents_per_insert_update)
def setUp(self): self.failover_util = failover_utils() self.node_util = node_utils() self.views_util = views_utils() self.log = logger.Logger.get_logger() self.input = TestInputSingleton.input self.primary_index_created = False self.use_sdk_client = self.input.param("use_sdk_client", False) self.analytics = self.input.param("analytics", False) if self.input.param("log_level", None): log.setLevel(level=0) for hd in log.handlers: if str(hd.__class__).find('FileHandler') != -1: hd.setLevel(level=logging.DEBUG) else: hd.setLevel(level=getattr( logging, self.input.param("log_level", None))) self.servers = self.input.servers if str(self.__class__).find('moxitests') != -1: self.moxi_server = self.input.moxis[0] self.servers = [ server for server in self.servers if server.ip != self.moxi_server.ip ] self.buckets = [] self.bucket_base_params = {} self.bucket_base_params['membase'] = {} self.master = self.servers[0] self.bucket_util = bucket_utils(self.master) self.cluster_util = cluster_utils(self.master) self.indexManager = self.servers[0] if not hasattr(self, 'cluster'): self.cluster = Cluster() self.pre_warmup_stats = {} self.cleanup = False self.nonroot = False shell = RemoteMachineShellConnection(self.master) self.os_info = shell.extract_remote_info().type.lower() if self.os_info != 'windows': if self.master.ssh_username != "root": self.nonroot = True shell.disconnect() """ some tests need to bypass checking cb server at set up to run installation """ self.skip_init_check_cbserver = \ self.input.param("skip_init_check_cbserver", False) self.data_collector = DataCollector() self.data_analyzer = DataAnalyzer() self.result_analyzer = DataAnalysisResultAnalyzer() # self.set_testrunner_client() self.change_bucket_properties = False self.cbas_node = self.input.cbas self.cbas_servers = [] self.kv_servers = [] self.otpNodes = [] for server in self.servers: if "cbas" in server.services: self.cbas_servers.append(server) if "kv" in server.services: self.kv_servers.append(server) if not self.cbas_node and len(self.cbas_servers) >= 1: self.cbas_node = self.cbas_servers[0] try: self.skip_setup_cleanup = self.input.param("skip_setup_cleanup", False) self.vbuckets = self.input.param("vbuckets", 1024) self.upr = self.input.param("upr", None) self.index_quota_percent = self.input.param( "index_quota_percent", None) self.targetIndexManager = self.input.param("targetIndexManager", False) self.targetMaster = self.input.param("targetMaster", False) self.reset_services = self.input.param("reset_services", False) self.auth_mech = self.input.param("auth_mech", "PLAIN") self.wait_timeout = self.input.param("wait_timeout", 60) # number of case that is performed from testrunner( increment each time) self.case_number = self.input.param("case_number", 0) self.default_bucket = self.input.param("default_bucket", True) self.parallelism = self.input.param("parallelism", False) if self.default_bucket: self.default_bucket_name = "default" self.standard_buckets = self.input.param("standard_buckets", 0) self.sasl_buckets = self.input.param("sasl_buckets", 0) self.num_buckets = self.input.param("num_buckets", 0) self.verify_unacked_bytes = self.input.param( "verify_unacked_bytes", False) self.memcached_buckets = self.input.param("memcached_buckets", 0) self.enable_flow_control = self.input.param( "enable_flow_control", False) self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets + self.memcached_buckets self.num_servers = self.input.param("servers", len(self.servers)) # initial number of items in the cluster self.nodes_init = self.input.param("nodes_init", 1) self.nodes_in = self.input.param("nodes_in", 1) self.nodes_out = self.input.param("nodes_out", 1) self.services_init = self.input.param("services_init", None) self.services_in = self.input.param("services_in", None) self.forceEject = self.input.param("forceEject", False) self.force_kill_memcached = TestInputSingleton.input.param( 'force_kill_memcached', False) self.num_items = self.input.param("items", 1000) self.value_size = self.input.param("value_size", 512) self.dgm_run = self.input.param("dgm_run", False) self.active_resident_threshold = int( self.input.param("active_resident_threshold", 0)) # max items number to verify in ValidateDataTask, None - verify all self.max_verify = self.input.param("max_verify", None) # we don't change consistent_view on server by default self.disabled_consistent_view = self.input.param( "disabled_consistent_view", None) self.rebalanceIndexWaitingDisabled = self.input.param( "rebalanceIndexWaitingDisabled", None) self.rebalanceIndexPausingDisabled = self.input.param( "rebalanceIndexPausingDisabled", None) self.maxParallelIndexers = self.input.param( "maxParallelIndexers", None) self.maxParallelReplicaIndexers = self.input.param( "maxParallelReplicaIndexers", None) self.quota_percent = self.input.param("quota_percent", None) self.port = None self.log_message = self.input.param("log_message", None) self.log_info = self.input.param("log_info", None) self.log_location = self.input.param("log_location", None) self.stat_info = self.input.param("stat_info", None) self.port_info = self.input.param("port_info", None) if not hasattr(self, 'skip_buckets_handle'): self.skip_buckets_handle = self.input.param( "skip_buckets_handle", False) self.nodes_out_dist = self.input.param("nodes_out_dist", None) self.absolute_path = self.input.param("absolute_path", True) self.test_timeout = self.input.param( "test_timeout", 3600) # kill hang test and jump to next one. self.enable_bloom_filter = self.input.param( "enable_bloom_filter", False) self.enable_time_sync = self.input.param("enable_time_sync", False) self.gsi_type = self.input.param("gsi_type", 'plasma') # bucket parameters go here, self.bucket_size = self.input.param("bucket_size", None) self.bucket_type = self.input.param("bucket_type", 'membase') self.num_replicas = self.input.param("replicas", 1) self.enable_replica_index = self.input.param("index_replicas", 1) self.eviction_policy = self.input.param( "eviction_policy", 'valueOnly') # or 'fullEviction' # for ephemeral bucket is can be noEviction or nruEviction if self.bucket_type == 'ephemeral' and self.eviction_policy == 'valueOnly': # use the ephemeral bucket default self.eviction_policy = 'noEviction' # for ephemeral buckets it self.sasl_password = self.input.param("sasl_password", 'password') self.lww = self.input.param( "lww", False ) # only applies to LWW but is here because the bucket is created here self.maxttl = self.input.param("maxttl", None) self.compression_mode = self.input.param("compression_mode", 'passive') self.sdk_compression = self.input.param("sdk_compression", True) self.sasl_bucket_name = "bucket" self.sasl_bucket_priority = self.input.param( "sasl_bucket_priority", None) self.standard_bucket_priority = self.input.param( "standard_bucket_priority", None) # end of bucket parameters spot (this is ongoing) if self.skip_setup_cleanup: self.buckets = BucketHelper(self.master).get_buckets() return if not self.skip_init_check_cbserver: self.cb_version = None if RestHelper(RestConnection( self.master)).is_ns_server_running(): """ since every new couchbase version, there will be new features that test code will not work on previous release. So we need to get couchbase version to filter out those tests. """ self.cb_version = RestConnection( self.master).get_nodes_version() else: log.info("couchbase server does not run yet") self.protocol = self.get_protocol_type() self.services_map = None if self.sasl_bucket_priority is not None: self.sasl_bucket_priority = self.sasl_bucket_priority.split( ":") if self.standard_bucket_priority is not None: self.standard_bucket_priority = self.standard_bucket_priority.split( ":") log.info("============== basetestcase setup was started for test #{0} {1}==============" \ .format(self.case_number, self._testMethodName)) if not self.skip_buckets_handle and not self.skip_init_check_cbserver: self._cluster_cleanup() shared_params = self._create_bucket_params( server=self.master, size=self.bucket_size, replicas=self.num_replicas, enable_replica_index=self.enable_replica_index, eviction_policy=self.eviction_policy, bucket_priority=None, lww=self.lww, maxttl=self.maxttl, compression_mode=self.compression_mode) membase_params = copy.deepcopy(shared_params) membase_params['bucket_type'] = 'membase' self.bucket_base_params['membase'][ 'non_ephemeral'] = membase_params membase_ephemeral_params = copy.deepcopy(shared_params) membase_ephemeral_params['bucket_type'] = 'ephemeral' self.bucket_base_params['membase'][ 'ephemeral'] = membase_ephemeral_params memcached_params = copy.deepcopy(shared_params) memcached_params['bucket_type'] = 'memcached' self.bucket_base_params['memcached'] = memcached_params # avoid any cluster operations in setup for new upgrade # & upgradeXDCR tests if str(self.__class__).find('newupgradetests') != -1 or \ str(self.__class__).find('upgradeXDCR') != -1 or \ str(self.__class__).find('Upgrade_EpTests') != -1 or \ hasattr(self, 'skip_buckets_handle') and \ self.skip_buckets_handle: log.info("any cluster operation in setup will be skipped") self.primary_index_created = True log.info("============== basetestcase setup was finished for test #{0} {1} ==============" \ .format(self.case_number, self._testMethodName)) return # avoid clean up if the previous test has been tear down if self.case_number == 1 or self.case_number > 1000: if self.case_number > 1000: log.warn( "teardDown for previous test failed. will retry..") self.case_number -= 1000 self.cleanup = True if not self.skip_init_check_cbserver: self.tearDownEverything() self.cluster = Cluster() if not self.skip_init_check_cbserver: log.info("initializing cluster") self.reset_cluster() master_services = self.get_services(self.servers[:1], \ self.services_init, \ start_node=0) if master_services != None: master_services = master_services[0].split(",") self.quota = self._initialize_nodes(self.cluster, self.servers, \ self.disabled_consistent_view, \ self.rebalanceIndexWaitingDisabled, \ self.rebalanceIndexPausingDisabled, \ self.maxParallelIndexers, \ self.maxParallelReplicaIndexers, \ self.port, \ self.quota_percent, \ services=master_services) self.change_env_variables() self.change_checkpoint_params() # Add built-in user if not self.skip_init_check_cbserver: self.add_built_in_server_user(node=self.master) log.info("done initializing cluster") else: self.quota = "" if self.input.param("log_info", None): self.change_log_info() if self.input.param("log_location", None): self.change_log_location() if self.input.param("stat_info", None): self.change_stat_info() if self.input.param("port_info", None): self.change_port_info() if self.input.param("port", None): self.port = str(self.input.param("port", None)) try: if (str(self.__class__).find('rebalanceout.RebalanceOutTests') != -1) or \ (str(self.__class__).find('memorysanitytests.MemorySanity') != -1) or \ str(self.__class__).find('negativetests.NegativeTests') != -1 or \ str(self.__class__).find('warmuptest.WarmUpTests') != -1 or \ str(self.__class__).find('failover.failovertests.FailoverTests') != -1 or \ str(self.__class__).find('observe.observeseqnotests.ObserveSeqNoTests') != -1 or \ str(self.__class__).find('epengine.lwwepengine.LWW_EP_Engine') != -1: self.services = self.get_services(self.servers, self.services_init) # rebalance all nodes into the cluster before each test self.cluster.rebalance(self.servers[:self.num_servers], self.servers[1:self.num_servers], [], services=self.services) elif self.nodes_init > 1 and not self.skip_init_check_cbserver: self.services = self.get_services( self.servers[:self.nodes_init], self.services_init) self.cluster.rebalance(self.servers[:1], \ self.servers[1:self.nodes_init], \ [], services=self.services) elif str(self.__class__).find('ViewQueryTests') != -1 and \ not self.input.param("skip_rebalance", False): self.services = self.get_services(self.servers, self.services_init) self.cluster.rebalance(self.servers, self.servers[1:], [], services=self.services) self.setDebugLevel(service_type="index") except BaseException, e: # increase case_number to retry tearDown in setup for the next test self.case_number += 1000 self.fail(e) if self.dgm_run: self.quota = 256 if self.total_buckets > 10: log.info("================== changing max buckets from 10 to {0} =================" \ .format(self.total_buckets)) self.change_max_buckets(self, self.total_buckets) if self.total_buckets > 0 and not self.skip_init_check_cbserver: """ from sherlock, we have index service that could take some RAM quota from total RAM quota for couchbase server. We need to get the correct RAM quota available to create bucket(s) after all services were set """ node_info = RestConnection(self.master).get_nodes_self() if node_info.memoryQuota and int(node_info.memoryQuota) > 0: ram_available = node_info.memoryQuota else: ram_available = self.quota if self.bucket_size is None: if self.dgm_run: """ if dgm is set, we need to set bucket size to dgm setting """ self.bucket_size = self.quota else: self.bucket_size = self._get_bucket_size(ram_available, \ self.total_buckets) self.bucket_base_params['membase']['non_ephemeral'][ 'size'] = self.bucket_size self.bucket_base_params['membase']['ephemeral'][ 'size'] = self.bucket_size self.bucket_base_params['memcached']['size'] = self.bucket_size if str(self.__class__).find('upgrade_tests') == -1 and \ str(self.__class__).find('newupgradetests') == -1: self._bucket_creation() log.info("============== basetestcase setup was finished for test #{0} {1} ==============" \ .format(self.case_number, self._testMethodName)) if not self.skip_init_check_cbserver: self._log_start(self) self.sleep(10)
def setUp(self): self.log = logger.Logger.get_logger() self.input = TestInputSingleton.input self.primary_index_created = False self.use_sdk_client = self.input.param("use_sdk_client", False) if self.input.param("log_level", None): log.setLevel(level=0) for hd in log.handlers: if str(hd.__class__).find('FileHandler') != -1: hd.setLevel(level=logging.DEBUG) else: hd.setLevel(level=getattr( logging, self.input.param("log_level", None))) self.servers = self.input.servers self.buckets = [] self.case_number = self.input.param("case_number", 0) self.thread_to_use = self.input.param("threads_to_use", 10) self.cluster = CBCluster(servers=self.input.servers) self.task_manager = TaskManager(self.thread_to_use) self.cluster_util = cluster_utils(self.cluster, self.task_manager) self.bucket_util = bucket_utils(self.cluster, self.task_manager, self.cluster_util) self.task = ServerTasks(self.task_manager) self.cleanup = False self.nonroot = False shell = RemoteMachineShellConnection(self.cluster.master) self.os_info = shell.extract_remote_info().type.lower() if self.os_info != 'windows': if self.cluster.master.ssh_username != "root": self.nonroot = True shell.disconnect() """ some tests need to bypass checking cb server at set up to run installation """ self.skip_init_check_cbserver = self.input.param( "skip_init_check_cbserver", False) try: self.vbuckets = self.input.param("vbuckets", 1024) self.skip_setup_cleanup = self.input.param("skip_setup_cleanup", False) self.index_quota_percent = self.input.param( "index_quota_percent", None) self.num_servers = self.input.param("servers", len(self.cluster.servers)) # initial number of items in the cluster self.services_init = self.input.param("services_init", None) self.nodes_init = self.input.param("nodes_init", 1) self.nodes_in = self.input.param("nodes_in", 1) self.nodes_out = self.input.param("nodes_out", 1) self.services_in = self.input.param("services_in", None) self.forceEject = self.input.param("forceEject", False) self.num_items = self.input.param("num_items", 100000) self.num_replicas = self.input.param("replicas", 1) self.value_size = self.input.param("value_size", 1) self.wait_timeout = self.input.param("wait_timeout", 60) self.dgm_run = self.input.param("dgm_run", False) self.active_resident_threshold = int( self.input.param("active_resident_threshold", 0)) self.verify_unacked_bytes = self.input.param( "verify_unacked_bytes", False) self.force_kill_memcached = TestInputSingleton.input.param( 'force_kill_memcached', False) self.disabled_consistent_view = self.input.param( "disabled_consistent_view", None) self.rebalanceIndexWaitingDisabled = self.input.param( "rebalanceIndexWaitingDisabled", None) self.rebalanceIndexPausingDisabled = self.input.param( "rebalanceIndexPausingDisabled", None) self.maxParallelIndexers = self.input.param( "maxParallelIndexers", None) self.maxParallelReplicaIndexers = self.input.param( "maxParallelReplicaIndexers", None) self.quota_percent = self.input.param("quota_percent", None) self.port = None self.log_info = self.input.param("log_info", None) self.log_location = self.input.param("log_location", None) self.stat_info = self.input.param("stat_info", None) self.port_info = self.input.param("port_info", None) if not hasattr(self, 'skip_buckets_handle'): self.skip_buckets_handle = self.input.param( "skip_buckets_handle", False) self.test_timeout = self.input.param( "test_timeout", 3600) # kill hang test and jump to next one. self.gsi_type = self.input.param("gsi_type", 'plasma') self.compression_mode = self.input.param("compression_mode", 'passive') self.sdk_compression = self.input.param("sdk_compression", True) self.replicate_to = self.input.param("replicate_to", 0) self.persist_to = self.input.param("persist_to", 0) #jre-path for cbas self.jre_path = self.input.param("jre_path", None) # end of bucket parameters spot (this is ongoing) if self.skip_setup_cleanup: self.buckets = self.bucket_util.get_all_buckets() return if not self.skip_init_check_cbserver: self.cb_version = None if RestHelper(RestConnection( self.cluster.master)).is_ns_server_running(): """ since every new couchbase version, there will be new features that test code will not work on previous release. So we need to get couchbase version to filter out those tests. """ self.cb_version = RestConnection( self.cluster.master).get_nodes_version() else: log.info("couchbase server does not run yet") self.protocol = self.cluster_util.get_protocol_type() self.services_map = None log.info("============== basetestcase setup was started for test #{0} {1}==============" \ .format(self.case_number, self._testMethodName)) if not self.skip_buckets_handle and not self.skip_init_check_cbserver: self.cluster_util.cluster_cleanup(self.bucket_util) # avoid any cluster operations in setup for new upgrade # & upgradeXDCR tests if str(self.__class__).find('newupgradetests') != -1 or \ str(self.__class__).find('upgradeXDCR') != -1 or \ str(self.__class__).find('Upgrade_EpTests') != -1 or \ hasattr(self, 'skip_buckets_handle') and \ self.skip_buckets_handle: log.info("any cluster operation in setup will be skipped") self.primary_index_created = True log.info("============== basetestcase setup was finished for test #{0} {1} ==============" \ .format(self.case_number, self._testMethodName)) return # avoid clean up if the previous test has been tear down if self.case_number == 1 or self.case_number > 1000: if self.case_number > 1000: log.warn( "teardDown for previous test failed. will retry..") self.case_number -= 1000 self.cleanup = True if not self.skip_init_check_cbserver: self.tearDownEverything() self.task = ServerTasks(self.task_manager) if not self.skip_init_check_cbserver: log.info("initializing cluster") # self.cluster_util.reset_cluster() master_services = self.cluster_util.get_services(self.servers[:1], \ self.services_init, \ start_node=0) if master_services != None: master_services = master_services[0].split(",") self.quota = self._initialize_nodes(self.task, self.cluster.servers, \ self.disabled_consistent_view, \ self.rebalanceIndexWaitingDisabled, \ self.rebalanceIndexPausingDisabled, \ self.maxParallelIndexers, \ self.maxParallelReplicaIndexers, \ self.port, \ self.quota_percent, \ services=master_services) self.cluster_util.change_env_variables() self.cluster_util.change_checkpoint_params() log.info("done initializing cluster") else: self.quota = "" if self.input.param("log_info", None): self.cluster_util.change_log_info() if self.input.param("log_location", None): self.cluster_util.change_log_location() if self.input.param("stat_info", None): self.cluster_util.change_stat_info() if self.input.param("port_info", None): self.cluster_util.change_port_info() if self.input.param("port", None): self.port = str(self.input.param("port", None)) log.info("============== basetestcase setup was finished for test #{0} {1} ==============" \ .format(self.case_number, self._testMethodName)) if not self.skip_init_check_cbserver: self._log_start() self.sleep(5) except Exception, e: traceback.print_exc() self.task.shutdown(force=True) self.fail(e)
def setUp(self): self.failover_util = failover_utils() self.node_util = node_utils() self.views_util = views_utils() self.log = logger.Logger.get_logger() self.input = TestInputSingleton.input self.primary_index_created = False self.use_sdk_client = self.input.param("use_sdk_client", False) if self.input.param("log_level", None): log.setLevel(level=0) for hd in log.handlers: if str(hd.__class__).find('FileHandler') != -1: hd.setLevel(level=logging.DEBUG) else: hd.setLevel(level=getattr(logging, self.input.param("log_level", None))) self.servers = self.input.servers if str(self.__class__).find('moxitests') != -1: self.moxi_server = self.input.moxis[0] self.servers = [server for server in self.servers if server.ip != self.moxi_server.ip] self.buckets = [] self.cluster = CBCluster(servers=self.input.servers) self.bucket_util = bucket_utils(self.cluster) self.cluster_util = cluster_utils(self.cluster) self.task = ServerTasks() self.pre_warmup_stats = {} self.cleanup = False self.nonroot = False shell = RemoteMachineShellConnection(self.cluster.master) self.os_info = shell.extract_remote_info().type.lower() if self.os_info != 'windows': if self.cluster.master.ssh_username != "root": self.nonroot = True shell.disconnect() """ some tests need to bypass checking cb server at set up to run installation """ self.skip_init_check_cbserver = \ self.input.param("skip_init_check_cbserver", False) self.data_collector = DataCollector() self.data_analyzer = DataAnalyzer() self.result_analyzer = DataAnalysisResultAnalyzer() try: self.skip_setup_cleanup = self.input.param("skip_setup_cleanup", False) self.index_quota_percent = self.input.param("index_quota_percent", None) self.targetIndexManager = self.input.param("targetIndexManager", False) self.targetMaster = self.input.param("targetMaster", False) self.reset_services = self.input.param("reset_services", False) self.auth_mech = self.input.param("auth_mech", "PLAIN") self.wait_timeout = self.input.param("wait_timeout", 60) # number of case that is performed from testrunner( increment each time) self.case_number = self.input.param("case_number", 0) self.default_bucket = self.input.param("default_bucket", True) self.parallelism = self.input.param("parallelism", False) self.verify_unacked_bytes = self.input.param("verify_unacked_bytes", False) self.enable_flow_control = self.input.param("enable_flow_control", False) self.num_servers = self.input.param("servers", len(self.cluster.servers)) # initial number of items in the cluster self.nodes_init = self.input.param("nodes_init", 1) self.nodes_in = self.input.param("nodes_in", 1) self.nodes_out = self.input.param("nodes_out", 1) self.services_init = self.input.param("services_init", None) self.services_in = self.input.param("services_in", None) self.forceEject = self.input.param("forceEject", False) self.force_kill_memcached = TestInputSingleton.input.param('force_kill_memcached', False) self.num_items = self.input.param("items", 1000) self.value_size = self.input.param("value_size", 512) self.dgm_run = self.input.param("dgm_run", False) self.active_resident_threshold = int(self.input.param("active_resident_threshold", 0)) # max items number to verify in ValidateDataTask, None - verify all self.max_verify = self.input.param("max_verify", None) # we don't change consistent_view on server by default self.disabled_consistent_view = self.input.param("disabled_consistent_view", None) self.rebalanceIndexWaitingDisabled = self.input.param("rebalanceIndexWaitingDisabled", None) self.rebalanceIndexPausingDisabled = self.input.param("rebalanceIndexPausingDisabled", None) self.maxParallelIndexers = self.input.param("maxParallelIndexers", None) self.maxParallelReplicaIndexers = self.input.param("maxParallelReplicaIndexers", None) self.quota_percent = self.input.param("quota_percent", None) self.port = None self.log_message = self.input.param("log_message", None) self.log_info = self.input.param("log_info", None) self.log_location = self.input.param("log_location", None) self.stat_info = self.input.param("stat_info", None) self.port_info = self.input.param("port_info", None) if not hasattr(self, 'skip_buckets_handle'): self.skip_buckets_handle = self.input.param("skip_buckets_handle", False) self.nodes_out_dist = self.input.param("nodes_out_dist", None) self.absolute_path = self.input.param("absolute_path", True) self.test_timeout = self.input.param("test_timeout", 3600) # kill hang test and jump to next one. self.enable_bloom_filter = self.input.param("enable_bloom_filter", False) # self.enable_time_sync = self.input.param("enable_time_sync", False) self.gsi_type = self.input.param("gsi_type", 'plasma') #jre-path for cbas self.jre_path=self.input.param("jre_path",None) # end of bucket parameters spot (this is ongoing) if self.skip_setup_cleanup: self.buckets = BucketHelper(self.master).get_buckets() return if not self.skip_init_check_cbserver: self.cb_version = None if RestHelper(RestConnection(self.cluster.master)).is_ns_server_running(): """ since every new couchbase version, there will be new features that test code will not work on previous release. So we need to get couchbase version to filter out those tests. """ self.cb_version = RestConnection(self.cluster.master).get_nodes_version() else: log.info("couchbase server does not run yet") self.protocol = self.cluster_util.get_protocol_type() self.services_map = None log.info("============== basetestcase setup was started for test #{0} {1}==============" \ .format(self.case_number, self._testMethodName)) if not self.skip_buckets_handle and not self.skip_init_check_cbserver: self.cluster_util._cluster_cleanup(self.bucket_util) # avoid any cluster operations in setup for new upgrade # & upgradeXDCR tests if str(self.__class__).find('newupgradetests') != -1 or \ str(self.__class__).find('upgradeXDCR') != -1 or \ str(self.__class__).find('Upgrade_EpTests') != -1 or \ hasattr(self, 'skip_buckets_handle') and \ self.skip_buckets_handle: log.info("any cluster operation in setup will be skipped") self.primary_index_created = True log.info("============== basetestcase setup was finished for test #{0} {1} ==============" \ .format(self.case_number, self._testMethodName)) return # avoid clean up if the previous test has been tear down if self.case_number == 1 or self.case_number > 1000: if self.case_number > 1000: log.warn("teardDown for previous test failed. will retry..") self.case_number -= 1000 self.cleanup = True if not self.skip_init_check_cbserver: self.tearDownEverything() self.task = ServerTasks() if not self.skip_init_check_cbserver: log.info("initializing cluster") self.cluster_util.reset_cluster(self.targetMaster, self.reset_services) master_services = self.cluster_util.get_services(self.servers[:1], \ self.services_init, \ start_node=0) if master_services != None: master_services = master_services[0].split(",") self.quota = self._initialize_nodes(self.task, self.cluster.servers, \ self.disabled_consistent_view, \ self.rebalanceIndexWaitingDisabled, \ self.rebalanceIndexPausingDisabled, \ self.maxParallelIndexers, \ self.maxParallelReplicaIndexers, \ self.port, \ self.quota_percent, \ services=master_services) self.cluster_util.change_env_variables() self.cluster_util.change_checkpoint_params() # Add built-in user if not self.skip_init_check_cbserver: self.add_built_in_server_user(node=self.cluster.master) log.info("done initializing cluster") else: self.quota = "" if self.input.param("log_info", None): self.cluster_util.change_log_info() if self.input.param("log_location", None): self.cluster_util.change_log_location() if self.input.param("stat_info", None): self.cluster_util.change_stat_info() if self.input.param("port_info", None): self.cluster_util.change_port_info() if self.input.param("port", None): self.port = str(self.input.param("port", None)) try: if (str(self.__class__).find('rebalanceout.RebalanceOutTests') != -1) or \ (str(self.__class__).find('memorysanitytests.MemorySanity') != -1) or \ str(self.__class__).find('negativetests.NegativeTests') != -1 or \ str(self.__class__).find('warmuptest.WarmUpTests') != -1 or \ str(self.__class__).find('failover.failovertests.FailoverTests') != -1 or \ str(self.__class__).find('observe.observeseqnotests.ObserveSeqNoTests') != -1 or \ str(self.__class__).find('epengine.lwwepengine.LWW_EP_Engine') != -1: self.services = self.get_services(self.servers, self.services_init) # rebalance all nodes into the cluster before each test self.task.rebalance(self.servers[:self.num_servers], self.servers[1:self.num_servers], [], services=self.services) elif self.nodes_init > 1 and not self.skip_init_check_cbserver: self.services = self.get_services(self.servers[:self.nodes_init], self.services_init) self.task.rebalance(self.servers[:1], \ self.servers[1:self.nodes_init], \ [], services=self.services) elif str(self.__class__).find('ViewQueryTests') != -1 and \ not self.input.param("skip_rebalance", False): self.services = self.get_services(self.servers, self.services_init) self.task.rebalance(self.servers, self.servers[1:], [], services=self.services) self.cluster_util.setDebugLevel(service_type="index") except BaseException, e: # increase case_number to retry tearDown in setup for the next test self.case_number += 1000 self.fail(e) log.info("============== basetestcase setup was finished for test #{0} {1} ==============" \ .format(self.case_number, self._testMethodName)) if not self.skip_init_check_cbserver: self._log_start(self) self.sleep(10)