def setUp(self, add_defualt_cbas_node=True): self.log = logger.Logger.get_logger() if self._testMethodDoc: self.log.info("\n\nStarting Test: %s \n%s" % (self._testMethodName, self._testMethodDoc)) else: self.log.info("\n\nStarting Test: %s" % (self._testMethodName)) super(CBASBaseTest, self).setUp() self.cbas_node = self.input.cbas self.cbas_servers = [] self.kv_servers = [] for server in self.servers: if "cbas" in server.services: self.cbas_servers.append(server) if "kv" in server.services: self.kv_servers.append(server) rest = RestConnection(server) rest.set_data_path(data_path=server.data_path, index_path=server.index_path, cbas_path=server.cbas_path) self.analytics_helper = AnalyticsHelper() self._cb_cluster = self.cluster self.travel_sample_docs_count = 31591 self.beer_sample_docs_count = 7303 invalid_ip = '10.111.151.109' self.cb_bucket_name = self.input.param('cb_bucket_name', 'travel-sample') self.cbas_bucket_name = self.input.param('cbas_bucket_name', 'travel') self.cb_bucket_password = self.input.param('cb_bucket_password', None) self.expected_error = self.input.param("error", None) if self.expected_error: self.expected_error = self.expected_error.replace( "INVALID_IP", invalid_ip) self.expected_error = self.expected_error.replace( "PORT", self.master.port) self.cb_server_ip = self.input.param("cb_server_ip", None) self.cb_server_ip = self.cb_server_ip.replace( 'INVALID_IP', invalid_ip) if self.cb_server_ip is not None else None self.cbas_dataset_name = self.input.param("cbas_dataset_name", 'travel_ds') self.cbas_bucket_name_invalid = self.input.param( 'cbas_bucket_name_invalid', self.cbas_bucket_name) self.cbas_dataset2_name = self.input.param('cbas_dataset2_name', None) self.skip_create_dataset = self.input.param('skip_create_dataset', False) self.disconnect_if_connected = self.input.param( 'disconnect_if_connected', False) self.cbas_dataset_name_invalid = self.input.param( 'cbas_dataset_name_invalid', self.cbas_dataset_name) self.skip_drop_connection = self.input.param('skip_drop_connection', False) self.skip_drop_dataset = self.input.param('skip_drop_dataset', False) self.query_id = self.input.param('query_id', None) self.mode = self.input.param('mode', None) self.num_concurrent_queries = self.input.param('num_queries', 5000) self.concurrent_batch_size = self.input.param('concurrent_batch_size', 100) self.compiler_param = self.input.param('compiler_param', None) self.compiler_param_val = self.input.param('compiler_param_val', None) self.expect_reject = self.input.param('expect_reject', False) self.expect_failure = self.input.param('expect_failure', False) self.index_name = self.input.param('index_name', "NoName") self.index_fields = self.input.param('index_fields', None) if self.index_fields: self.index_fields = self.index_fields.split("-") self.otpNodes = [] self.cbas_path = server.cbas_path self.rest = RestConnection(self.master) self.log.info( "Setting the min possible memory quota so that adding more nodes to the cluster wouldn't be a problem." ) self.rest.set_service_memoryQuota(service='memoryQuota', memoryQuota=MIN_KV_QUOTA) self.rest.set_service_memoryQuota(service='ftsMemoryQuota', memoryQuota=FTS_QUOTA) self.rest.set_service_memoryQuota(service='indexMemoryQuota', memoryQuota=INDEX_QUOTA) self.set_cbas_memory_from_available_free_memory = self.input.param( 'set_cbas_memory_from_available_free_memory', False) if self.set_cbas_memory_from_available_free_memory: info = self.rest.get_nodes_self() self.cbas_memory_quota = int((info.memoryFree // 1024**2) * 0.9) self.log.info("Setting %d memory quota for CBAS" % self.cbas_memory_quota) self.rest.set_service_memoryQuota( service='cbasMemoryQuota', memoryQuota=self.cbas_memory_quota) else: self.log.info("Setting %d memory quota for CBAS" % CBAS_QUOTA) self.cbas_memory_quota = CBAS_QUOTA self.rest.set_service_memoryQuota(service='cbasMemoryQuota', memoryQuota=CBAS_QUOTA) self.cbas_util = None # Drop any existing buckets and datasets if self.cbas_node: self.cbas_util = cbas_utils(self.master, self.cbas_node) self.cleanup_cbas() if not self.cbas_node and len(self.cbas_servers) >= 1: self.cbas_node = self.cbas_servers[0] self.cbas_util = cbas_utils(self.master, self.cbas_node) if "cbas" in self.master.services: self.cleanup_cbas() if add_defualt_cbas_node: if self.master.ip != self.cbas_node.ip: self.otpNodes.append( cluster_utils(self.master).add_node(self.cbas_node)) else: self.otpNodes = self.rest.node_statuses() ''' This cbas cleanup is actually not needed. When a node is added to the cluster, it is automatically cleaned-up.''' self.cleanup_cbas() self.cbas_servers.remove(self.cbas_node) self.log.info("============== CBAS_BASE setup was finished for test #{0} {1} ==============" \ .format(self.case_number, self._testMethodName)) if add_defualt_cbas_node and False: self.log.info( "************************* Validate Java runtime *************************" ) analytics_node = [] analytics_node.extend(self.cbas_servers) analytics_node.append(self.cbas_node) for server in analytics_node: self.log.info('Validating java runtime info for :' + server.ip) util = cbas_utils(self.master, server) diag_res = util.get_analytics_diagnostics(self.cbas_node) java_home = diag_res['runtime']['systemProperties'][ 'java.home'] self.log.info('Java Home : ' + java_home) java_runtime_name = diag_res['runtime']['systemProperties'][ 'java.runtime.name'] self.log.info('Java runtime : ' + java_runtime_name) java_runtime_version = diag_res['runtime']['systemProperties'][ 'java.runtime.version'] self.log.info('Java runtime version: ' + java_runtime_version) jre_info = JAVA_RUN_TIMES[self.jre_path] self.assertTrue(jre_info['java_home'] in java_home, msg='Incorrect java home value') self.assertEqual(java_runtime_name, jre_info['java_runtime_name'], msg='Incorrect java runtime name') self.assertTrue(java_runtime_version.startswith( jre_info['java_runtime_version']), msg='Incorrect java runtime version') util.closeConn()
def setUp(self): self.log = logger.Logger.get_logger() self.input = TestInputSingleton.input self.primary_index_created = False self.use_sdk_client = self.input.param("use_sdk_client", False) if self.input.param("log_level", None): log.setLevel(level=0) for hd in log.handlers: if str(hd.__class__).find('FileHandler') != -1: hd.setLevel(level=logging.DEBUG) else: hd.setLevel(level=getattr( logging, self.input.param("log_level", None))) self.servers = self.input.servers self.buckets = [] self.case_number = self.input.param("case_number", 0) self.thread_to_use = self.input.param("threads_to_use", 10) self.cluster = CBCluster(servers=self.input.servers) self.task_manager = TaskManager(self.thread_to_use) self.cluster_util = cluster_utils(self.cluster, self.task_manager) self.bucket_util = bucket_utils(self.cluster, self.task_manager, self.cluster_util) self.task = ServerTasks(self.task_manager) self.cleanup = False self.nonroot = False shell = RemoteMachineShellConnection(self.cluster.master) self.os_info = shell.extract_remote_info().type.lower() if self.os_info != 'windows': if self.cluster.master.ssh_username != "root": self.nonroot = True shell.disconnect() """ some tests need to bypass checking cb server at set up to run installation """ self.skip_init_check_cbserver = self.input.param( "skip_init_check_cbserver", False) try: self.vbuckets = self.input.param("vbuckets", 1024) self.skip_setup_cleanup = self.input.param("skip_setup_cleanup", False) self.index_quota_percent = self.input.param( "index_quota_percent", None) self.num_servers = self.input.param("servers", len(self.cluster.servers)) # initial number of items in the cluster self.services_init = self.input.param("services_init", None) self.nodes_init = self.input.param("nodes_init", 1) self.nodes_in = self.input.param("nodes_in", 1) self.nodes_out = self.input.param("nodes_out", 1) self.services_in = self.input.param("services_in", None) self.forceEject = self.input.param("forceEject", False) self.num_items = self.input.param("num_items", 100000) self.num_replicas = self.input.param("replicas", 1) self.value_size = self.input.param("value_size", 1) self.wait_timeout = self.input.param("wait_timeout", 60) self.dgm_run = self.input.param("dgm_run", False) self.active_resident_threshold = int( self.input.param("active_resident_threshold", 0)) self.verify_unacked_bytes = self.input.param( "verify_unacked_bytes", False) self.force_kill_memcached = TestInputSingleton.input.param( 'force_kill_memcached', False) self.disabled_consistent_view = self.input.param( "disabled_consistent_view", None) self.rebalanceIndexWaitingDisabled = self.input.param( "rebalanceIndexWaitingDisabled", None) self.rebalanceIndexPausingDisabled = self.input.param( "rebalanceIndexPausingDisabled", None) self.maxParallelIndexers = self.input.param( "maxParallelIndexers", None) self.maxParallelReplicaIndexers = self.input.param( "maxParallelReplicaIndexers", None) self.quota_percent = self.input.param("quota_percent", None) self.port = None self.log_info = self.input.param("log_info", None) self.log_location = self.input.param("log_location", None) self.stat_info = self.input.param("stat_info", None) self.port_info = self.input.param("port_info", None) if not hasattr(self, 'skip_buckets_handle'): self.skip_buckets_handle = self.input.param( "skip_buckets_handle", False) self.test_timeout = self.input.param( "test_timeout", 3600) # kill hang test and jump to next one. self.gsi_type = self.input.param("gsi_type", 'plasma') self.compression_mode = self.input.param("compression_mode", 'passive') self.sdk_compression = self.input.param("sdk_compression", True) self.replicate_to = self.input.param("replicate_to", 0) self.persist_to = self.input.param("persist_to", 0) #jre-path for cbas self.jre_path = self.input.param("jre_path", None) # end of bucket parameters spot (this is ongoing) if self.skip_setup_cleanup: self.buckets = self.bucket_util.get_all_buckets() return if not self.skip_init_check_cbserver: self.cb_version = None if RestHelper(RestConnection( self.cluster.master)).is_ns_server_running(): """ since every new couchbase version, there will be new features that test code will not work on previous release. So we need to get couchbase version to filter out those tests. """ self.cb_version = RestConnection( self.cluster.master).get_nodes_version() else: log.info("couchbase server does not run yet") self.protocol = self.cluster_util.get_protocol_type() self.services_map = None log.info("============== basetestcase setup was started for test #{0} {1}==============" \ .format(self.case_number, self._testMethodName)) if not self.skip_buckets_handle and not self.skip_init_check_cbserver: self.cluster_util.cluster_cleanup(self.bucket_util) # avoid any cluster operations in setup for new upgrade # & upgradeXDCR tests if str(self.__class__).find('newupgradetests') != -1 or \ str(self.__class__).find('upgradeXDCR') != -1 or \ str(self.__class__).find('Upgrade_EpTests') != -1 or \ hasattr(self, 'skip_buckets_handle') and \ self.skip_buckets_handle: log.info("any cluster operation in setup will be skipped") self.primary_index_created = True log.info("============== basetestcase setup was finished for test #{0} {1} ==============" \ .format(self.case_number, self._testMethodName)) return # avoid clean up if the previous test has been tear down if self.case_number == 1 or self.case_number > 1000: if self.case_number > 1000: log.warn( "teardDown for previous test failed. will retry..") self.case_number -= 1000 self.cleanup = True if not self.skip_init_check_cbserver: self.tearDownEverything() self.task = ServerTasks(self.task_manager) if not self.skip_init_check_cbserver: log.info("initializing cluster") # self.cluster_util.reset_cluster() master_services = self.cluster_util.get_services(self.servers[:1], \ self.services_init, \ start_node=0) if master_services != None: master_services = master_services[0].split(",") self.quota = self._initialize_nodes(self.task, self.cluster.servers, \ self.disabled_consistent_view, \ self.rebalanceIndexWaitingDisabled, \ self.rebalanceIndexPausingDisabled, \ self.maxParallelIndexers, \ self.maxParallelReplicaIndexers, \ self.port, \ self.quota_percent, \ services=master_services) self.cluster_util.change_env_variables() self.cluster_util.change_checkpoint_params() log.info("done initializing cluster") else: self.quota = "" if self.input.param("log_info", None): self.cluster_util.change_log_info() if self.input.param("log_location", None): self.cluster_util.change_log_location() if self.input.param("stat_info", None): self.cluster_util.change_stat_info() if self.input.param("port_info", None): self.cluster_util.change_port_info() if self.input.param("port", None): self.port = str(self.input.param("port", None)) log.info("============== basetestcase setup was finished for test #{0} {1} ==============" \ .format(self.case_number, self._testMethodName)) if not self.skip_init_check_cbserver: self._log_start() self.sleep(5) except Exception, e: traceback.print_exc() self.task.shutdown(force=True) self.fail(e)
def setUp(self): self.failover_util = failover_utils() self.node_util = node_utils() self.views_util = views_utils() self.log = logger.Logger.get_logger() self.input = TestInputSingleton.input self.primary_index_created = False self.use_sdk_client = self.input.param("use_sdk_client", False) self.analytics = self.input.param("analytics", False) if self.input.param("log_level", None): log.setLevel(level=0) for hd in log.handlers: if str(hd.__class__).find('FileHandler') != -1: hd.setLevel(level=logging.DEBUG) else: hd.setLevel(level=getattr( logging, self.input.param("log_level", None))) self.servers = self.input.servers if str(self.__class__).find('moxitests') != -1: self.moxi_server = self.input.moxis[0] self.servers = [ server for server in self.servers if server.ip != self.moxi_server.ip ] self.buckets = [] self.bucket_base_params = {} self.bucket_base_params['membase'] = {} self.master = self.servers[0] self.bucket_util = bucket_utils(self.master) self.cluster_util = cluster_utils(self.master) self.indexManager = self.servers[0] if not hasattr(self, 'cluster'): self.cluster = Cluster() self.pre_warmup_stats = {} self.cleanup = False self.nonroot = False shell = RemoteMachineShellConnection(self.master) self.os_info = shell.extract_remote_info().type.lower() if self.os_info != 'windows': if self.master.ssh_username != "root": self.nonroot = True shell.disconnect() """ some tests need to bypass checking cb server at set up to run installation """ self.skip_init_check_cbserver = \ self.input.param("skip_init_check_cbserver", False) self.data_collector = DataCollector() self.data_analyzer = DataAnalyzer() self.result_analyzer = DataAnalysisResultAnalyzer() # self.set_testrunner_client() self.change_bucket_properties = False self.cbas_node = self.input.cbas self.cbas_servers = [] self.kv_servers = [] self.otpNodes = [] for server in self.servers: if "cbas" in server.services: self.cbas_servers.append(server) if "kv" in server.services: self.kv_servers.append(server) if not self.cbas_node and len(self.cbas_servers) >= 1: self.cbas_node = self.cbas_servers[0] try: self.skip_setup_cleanup = self.input.param("skip_setup_cleanup", False) self.vbuckets = self.input.param("vbuckets", 1024) self.upr = self.input.param("upr", None) self.index_quota_percent = self.input.param( "index_quota_percent", None) self.targetIndexManager = self.input.param("targetIndexManager", False) self.targetMaster = self.input.param("targetMaster", False) self.reset_services = self.input.param("reset_services", False) self.auth_mech = self.input.param("auth_mech", "PLAIN") self.wait_timeout = self.input.param("wait_timeout", 60) # number of case that is performed from testrunner( increment each time) self.case_number = self.input.param("case_number", 0) self.default_bucket = self.input.param("default_bucket", True) self.parallelism = self.input.param("parallelism", False) if self.default_bucket: self.default_bucket_name = "default" self.standard_buckets = self.input.param("standard_buckets", 0) self.sasl_buckets = self.input.param("sasl_buckets", 0) self.num_buckets = self.input.param("num_buckets", 0) self.verify_unacked_bytes = self.input.param( "verify_unacked_bytes", False) self.memcached_buckets = self.input.param("memcached_buckets", 0) self.enable_flow_control = self.input.param( "enable_flow_control", False) self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets + self.memcached_buckets self.num_servers = self.input.param("servers", len(self.servers)) # initial number of items in the cluster self.nodes_init = self.input.param("nodes_init", 1) self.nodes_in = self.input.param("nodes_in", 1) self.nodes_out = self.input.param("nodes_out", 1) self.services_init = self.input.param("services_init", None) self.services_in = self.input.param("services_in", None) self.forceEject = self.input.param("forceEject", False) self.force_kill_memcached = TestInputSingleton.input.param( 'force_kill_memcached', False) self.num_items = self.input.param("items", 1000) self.value_size = self.input.param("value_size", 512) self.dgm_run = self.input.param("dgm_run", False) self.active_resident_threshold = int( self.input.param("active_resident_threshold", 0)) # max items number to verify in ValidateDataTask, None - verify all self.max_verify = self.input.param("max_verify", None) # we don't change consistent_view on server by default self.disabled_consistent_view = self.input.param( "disabled_consistent_view", None) self.rebalanceIndexWaitingDisabled = self.input.param( "rebalanceIndexWaitingDisabled", None) self.rebalanceIndexPausingDisabled = self.input.param( "rebalanceIndexPausingDisabled", None) self.maxParallelIndexers = self.input.param( "maxParallelIndexers", None) self.maxParallelReplicaIndexers = self.input.param( "maxParallelReplicaIndexers", None) self.quota_percent = self.input.param("quota_percent", None) self.port = None self.log_message = self.input.param("log_message", None) self.log_info = self.input.param("log_info", None) self.log_location = self.input.param("log_location", None) self.stat_info = self.input.param("stat_info", None) self.port_info = self.input.param("port_info", None) if not hasattr(self, 'skip_buckets_handle'): self.skip_buckets_handle = self.input.param( "skip_buckets_handle", False) self.nodes_out_dist = self.input.param("nodes_out_dist", None) self.absolute_path = self.input.param("absolute_path", True) self.test_timeout = self.input.param( "test_timeout", 3600) # kill hang test and jump to next one. self.enable_bloom_filter = self.input.param( "enable_bloom_filter", False) self.enable_time_sync = self.input.param("enable_time_sync", False) self.gsi_type = self.input.param("gsi_type", 'plasma') # bucket parameters go here, self.bucket_size = self.input.param("bucket_size", None) self.bucket_type = self.input.param("bucket_type", 'membase') self.num_replicas = self.input.param("replicas", 1) self.enable_replica_index = self.input.param("index_replicas", 1) self.eviction_policy = self.input.param( "eviction_policy", 'valueOnly') # or 'fullEviction' # for ephemeral bucket is can be noEviction or nruEviction if self.bucket_type == 'ephemeral' and self.eviction_policy == 'valueOnly': # use the ephemeral bucket default self.eviction_policy = 'noEviction' # for ephemeral buckets it self.sasl_password = self.input.param("sasl_password", 'password') self.lww = self.input.param( "lww", False ) # only applies to LWW but is here because the bucket is created here self.maxttl = self.input.param("maxttl", None) self.compression_mode = self.input.param("compression_mode", 'passive') self.sdk_compression = self.input.param("sdk_compression", True) self.sasl_bucket_name = "bucket" self.sasl_bucket_priority = self.input.param( "sasl_bucket_priority", None) self.standard_bucket_priority = self.input.param( "standard_bucket_priority", None) # end of bucket parameters spot (this is ongoing) if self.skip_setup_cleanup: self.buckets = BucketHelper(self.master).get_buckets() return if not self.skip_init_check_cbserver: self.cb_version = None if RestHelper(RestConnection( self.master)).is_ns_server_running(): """ since every new couchbase version, there will be new features that test code will not work on previous release. So we need to get couchbase version to filter out those tests. """ self.cb_version = RestConnection( self.master).get_nodes_version() else: log.info("couchbase server does not run yet") self.protocol = self.get_protocol_type() self.services_map = None if self.sasl_bucket_priority is not None: self.sasl_bucket_priority = self.sasl_bucket_priority.split( ":") if self.standard_bucket_priority is not None: self.standard_bucket_priority = self.standard_bucket_priority.split( ":") log.info("============== basetestcase setup was started for test #{0} {1}==============" \ .format(self.case_number, self._testMethodName)) if not self.skip_buckets_handle and not self.skip_init_check_cbserver: self._cluster_cleanup() shared_params = self._create_bucket_params( server=self.master, size=self.bucket_size, replicas=self.num_replicas, enable_replica_index=self.enable_replica_index, eviction_policy=self.eviction_policy, bucket_priority=None, lww=self.lww, maxttl=self.maxttl, compression_mode=self.compression_mode) membase_params = copy.deepcopy(shared_params) membase_params['bucket_type'] = 'membase' self.bucket_base_params['membase'][ 'non_ephemeral'] = membase_params membase_ephemeral_params = copy.deepcopy(shared_params) membase_ephemeral_params['bucket_type'] = 'ephemeral' self.bucket_base_params['membase'][ 'ephemeral'] = membase_ephemeral_params memcached_params = copy.deepcopy(shared_params) memcached_params['bucket_type'] = 'memcached' self.bucket_base_params['memcached'] = memcached_params # avoid any cluster operations in setup for new upgrade # & upgradeXDCR tests if str(self.__class__).find('newupgradetests') != -1 or \ str(self.__class__).find('upgradeXDCR') != -1 or \ str(self.__class__).find('Upgrade_EpTests') != -1 or \ hasattr(self, 'skip_buckets_handle') and \ self.skip_buckets_handle: log.info("any cluster operation in setup will be skipped") self.primary_index_created = True log.info("============== basetestcase setup was finished for test #{0} {1} ==============" \ .format(self.case_number, self._testMethodName)) return # avoid clean up if the previous test has been tear down if self.case_number == 1 or self.case_number > 1000: if self.case_number > 1000: log.warn( "teardDown for previous test failed. will retry..") self.case_number -= 1000 self.cleanup = True if not self.skip_init_check_cbserver: self.tearDownEverything() self.cluster = Cluster() if not self.skip_init_check_cbserver: log.info("initializing cluster") self.reset_cluster() master_services = self.get_services(self.servers[:1], \ self.services_init, \ start_node=0) if master_services != None: master_services = master_services[0].split(",") self.quota = self._initialize_nodes(self.cluster, self.servers, \ self.disabled_consistent_view, \ self.rebalanceIndexWaitingDisabled, \ self.rebalanceIndexPausingDisabled, \ self.maxParallelIndexers, \ self.maxParallelReplicaIndexers, \ self.port, \ self.quota_percent, \ services=master_services) self.change_env_variables() self.change_checkpoint_params() # Add built-in user if not self.skip_init_check_cbserver: self.add_built_in_server_user(node=self.master) log.info("done initializing cluster") else: self.quota = "" if self.input.param("log_info", None): self.change_log_info() if self.input.param("log_location", None): self.change_log_location() if self.input.param("stat_info", None): self.change_stat_info() if self.input.param("port_info", None): self.change_port_info() if self.input.param("port", None): self.port = str(self.input.param("port", None)) try: if (str(self.__class__).find('rebalanceout.RebalanceOutTests') != -1) or \ (str(self.__class__).find('memorysanitytests.MemorySanity') != -1) or \ str(self.__class__).find('negativetests.NegativeTests') != -1 or \ str(self.__class__).find('warmuptest.WarmUpTests') != -1 or \ str(self.__class__).find('failover.failovertests.FailoverTests') != -1 or \ str(self.__class__).find('observe.observeseqnotests.ObserveSeqNoTests') != -1 or \ str(self.__class__).find('epengine.lwwepengine.LWW_EP_Engine') != -1: self.services = self.get_services(self.servers, self.services_init) # rebalance all nodes into the cluster before each test self.cluster.rebalance(self.servers[:self.num_servers], self.servers[1:self.num_servers], [], services=self.services) elif self.nodes_init > 1 and not self.skip_init_check_cbserver: self.services = self.get_services( self.servers[:self.nodes_init], self.services_init) self.cluster.rebalance(self.servers[:1], \ self.servers[1:self.nodes_init], \ [], services=self.services) elif str(self.__class__).find('ViewQueryTests') != -1 and \ not self.input.param("skip_rebalance", False): self.services = self.get_services(self.servers, self.services_init) self.cluster.rebalance(self.servers, self.servers[1:], [], services=self.services) self.setDebugLevel(service_type="index") except BaseException, e: # increase case_number to retry tearDown in setup for the next test self.case_number += 1000 self.fail(e) if self.dgm_run: self.quota = 256 if self.total_buckets > 10: log.info("================== changing max buckets from 10 to {0} =================" \ .format(self.total_buckets)) self.change_max_buckets(self, self.total_buckets) if self.total_buckets > 0 and not self.skip_init_check_cbserver: """ from sherlock, we have index service that could take some RAM quota from total RAM quota for couchbase server. We need to get the correct RAM quota available to create bucket(s) after all services were set """ node_info = RestConnection(self.master).get_nodes_self() if node_info.memoryQuota and int(node_info.memoryQuota) > 0: ram_available = node_info.memoryQuota else: ram_available = self.quota if self.bucket_size is None: if self.dgm_run: """ if dgm is set, we need to set bucket size to dgm setting """ self.bucket_size = self.quota else: self.bucket_size = self._get_bucket_size(ram_available, \ self.total_buckets) self.bucket_base_params['membase']['non_ephemeral'][ 'size'] = self.bucket_size self.bucket_base_params['membase']['ephemeral'][ 'size'] = self.bucket_size self.bucket_base_params['memcached']['size'] = self.bucket_size if str(self.__class__).find('upgrade_tests') == -1 and \ str(self.__class__).find('newupgradetests') == -1: self._bucket_creation() log.info("============== basetestcase setup was finished for test #{0} {1} ==============" \ .format(self.case_number, self._testMethodName)) if not self.skip_init_check_cbserver: self._log_start(self) self.sleep(10)
def setUp(self): self.failover_util = failover_utils() self.node_util = node_utils() self.views_util = views_utils() self.log = logger.Logger.get_logger() self.input = TestInputSingleton.input self.primary_index_created = False self.use_sdk_client = self.input.param("use_sdk_client", False) if self.input.param("log_level", None): log.setLevel(level=0) for hd in log.handlers: if str(hd.__class__).find('FileHandler') != -1: hd.setLevel(level=logging.DEBUG) else: hd.setLevel(level=getattr(logging, self.input.param("log_level", None))) self.servers = self.input.servers if str(self.__class__).find('moxitests') != -1: self.moxi_server = self.input.moxis[0] self.servers = [server for server in self.servers if server.ip != self.moxi_server.ip] self.buckets = [] self.cluster = CBCluster(servers=self.input.servers) self.bucket_util = bucket_utils(self.cluster) self.cluster_util = cluster_utils(self.cluster) self.task = ServerTasks() self.pre_warmup_stats = {} self.cleanup = False self.nonroot = False shell = RemoteMachineShellConnection(self.cluster.master) self.os_info = shell.extract_remote_info().type.lower() if self.os_info != 'windows': if self.cluster.master.ssh_username != "root": self.nonroot = True shell.disconnect() """ some tests need to bypass checking cb server at set up to run installation """ self.skip_init_check_cbserver = \ self.input.param("skip_init_check_cbserver", False) self.data_collector = DataCollector() self.data_analyzer = DataAnalyzer() self.result_analyzer = DataAnalysisResultAnalyzer() try: self.skip_setup_cleanup = self.input.param("skip_setup_cleanup", False) self.index_quota_percent = self.input.param("index_quota_percent", None) self.targetIndexManager = self.input.param("targetIndexManager", False) self.targetMaster = self.input.param("targetMaster", False) self.reset_services = self.input.param("reset_services", False) self.auth_mech = self.input.param("auth_mech", "PLAIN") self.wait_timeout = self.input.param("wait_timeout", 60) # number of case that is performed from testrunner( increment each time) self.case_number = self.input.param("case_number", 0) self.default_bucket = self.input.param("default_bucket", True) self.parallelism = self.input.param("parallelism", False) self.verify_unacked_bytes = self.input.param("verify_unacked_bytes", False) self.enable_flow_control = self.input.param("enable_flow_control", False) self.num_servers = self.input.param("servers", len(self.cluster.servers)) # initial number of items in the cluster self.nodes_init = self.input.param("nodes_init", 1) self.nodes_in = self.input.param("nodes_in", 1) self.nodes_out = self.input.param("nodes_out", 1) self.services_init = self.input.param("services_init", None) self.services_in = self.input.param("services_in", None) self.forceEject = self.input.param("forceEject", False) self.force_kill_memcached = TestInputSingleton.input.param('force_kill_memcached', False) self.num_items = self.input.param("items", 1000) self.value_size = self.input.param("value_size", 512) self.dgm_run = self.input.param("dgm_run", False) self.active_resident_threshold = int(self.input.param("active_resident_threshold", 0)) # max items number to verify in ValidateDataTask, None - verify all self.max_verify = self.input.param("max_verify", None) # we don't change consistent_view on server by default self.disabled_consistent_view = self.input.param("disabled_consistent_view", None) self.rebalanceIndexWaitingDisabled = self.input.param("rebalanceIndexWaitingDisabled", None) self.rebalanceIndexPausingDisabled = self.input.param("rebalanceIndexPausingDisabled", None) self.maxParallelIndexers = self.input.param("maxParallelIndexers", None) self.maxParallelReplicaIndexers = self.input.param("maxParallelReplicaIndexers", None) self.quota_percent = self.input.param("quota_percent", None) self.port = None self.log_message = self.input.param("log_message", None) self.log_info = self.input.param("log_info", None) self.log_location = self.input.param("log_location", None) self.stat_info = self.input.param("stat_info", None) self.port_info = self.input.param("port_info", None) if not hasattr(self, 'skip_buckets_handle'): self.skip_buckets_handle = self.input.param("skip_buckets_handle", False) self.nodes_out_dist = self.input.param("nodes_out_dist", None) self.absolute_path = self.input.param("absolute_path", True) self.test_timeout = self.input.param("test_timeout", 3600) # kill hang test and jump to next one. self.enable_bloom_filter = self.input.param("enable_bloom_filter", False) # self.enable_time_sync = self.input.param("enable_time_sync", False) self.gsi_type = self.input.param("gsi_type", 'plasma') #jre-path for cbas self.jre_path=self.input.param("jre_path",None) # end of bucket parameters spot (this is ongoing) if self.skip_setup_cleanup: self.buckets = BucketHelper(self.master).get_buckets() return if not self.skip_init_check_cbserver: self.cb_version = None if RestHelper(RestConnection(self.cluster.master)).is_ns_server_running(): """ since every new couchbase version, there will be new features that test code will not work on previous release. So we need to get couchbase version to filter out those tests. """ self.cb_version = RestConnection(self.cluster.master).get_nodes_version() else: log.info("couchbase server does not run yet") self.protocol = self.cluster_util.get_protocol_type() self.services_map = None log.info("============== basetestcase setup was started for test #{0} {1}==============" \ .format(self.case_number, self._testMethodName)) if not self.skip_buckets_handle and not self.skip_init_check_cbserver: self.cluster_util._cluster_cleanup(self.bucket_util) # avoid any cluster operations in setup for new upgrade # & upgradeXDCR tests if str(self.__class__).find('newupgradetests') != -1 or \ str(self.__class__).find('upgradeXDCR') != -1 or \ str(self.__class__).find('Upgrade_EpTests') != -1 or \ hasattr(self, 'skip_buckets_handle') and \ self.skip_buckets_handle: log.info("any cluster operation in setup will be skipped") self.primary_index_created = True log.info("============== basetestcase setup was finished for test #{0} {1} ==============" \ .format(self.case_number, self._testMethodName)) return # avoid clean up if the previous test has been tear down if self.case_number == 1 or self.case_number > 1000: if self.case_number > 1000: log.warn("teardDown for previous test failed. will retry..") self.case_number -= 1000 self.cleanup = True if not self.skip_init_check_cbserver: self.tearDownEverything() self.task = ServerTasks() if not self.skip_init_check_cbserver: log.info("initializing cluster") self.cluster_util.reset_cluster(self.targetMaster, self.reset_services) master_services = self.cluster_util.get_services(self.servers[:1], \ self.services_init, \ start_node=0) if master_services != None: master_services = master_services[0].split(",") self.quota = self._initialize_nodes(self.task, self.cluster.servers, \ self.disabled_consistent_view, \ self.rebalanceIndexWaitingDisabled, \ self.rebalanceIndexPausingDisabled, \ self.maxParallelIndexers, \ self.maxParallelReplicaIndexers, \ self.port, \ self.quota_percent, \ services=master_services) self.cluster_util.change_env_variables() self.cluster_util.change_checkpoint_params() # Add built-in user if not self.skip_init_check_cbserver: self.add_built_in_server_user(node=self.cluster.master) log.info("done initializing cluster") else: self.quota = "" if self.input.param("log_info", None): self.cluster_util.change_log_info() if self.input.param("log_location", None): self.cluster_util.change_log_location() if self.input.param("stat_info", None): self.cluster_util.change_stat_info() if self.input.param("port_info", None): self.cluster_util.change_port_info() if self.input.param("port", None): self.port = str(self.input.param("port", None)) try: if (str(self.__class__).find('rebalanceout.RebalanceOutTests') != -1) or \ (str(self.__class__).find('memorysanitytests.MemorySanity') != -1) or \ str(self.__class__).find('negativetests.NegativeTests') != -1 or \ str(self.__class__).find('warmuptest.WarmUpTests') != -1 or \ str(self.__class__).find('failover.failovertests.FailoverTests') != -1 or \ str(self.__class__).find('observe.observeseqnotests.ObserveSeqNoTests') != -1 or \ str(self.__class__).find('epengine.lwwepengine.LWW_EP_Engine') != -1: self.services = self.get_services(self.servers, self.services_init) # rebalance all nodes into the cluster before each test self.task.rebalance(self.servers[:self.num_servers], self.servers[1:self.num_servers], [], services=self.services) elif self.nodes_init > 1 and not self.skip_init_check_cbserver: self.services = self.get_services(self.servers[:self.nodes_init], self.services_init) self.task.rebalance(self.servers[:1], \ self.servers[1:self.nodes_init], \ [], services=self.services) elif str(self.__class__).find('ViewQueryTests') != -1 and \ not self.input.param("skip_rebalance", False): self.services = self.get_services(self.servers, self.services_init) self.task.rebalance(self.servers, self.servers[1:], [], services=self.services) self.cluster_util.setDebugLevel(service_type="index") except BaseException, e: # increase case_number to retry tearDown in setup for the next test self.case_number += 1000 self.fail(e) log.info("============== basetestcase setup was finished for test #{0} {1} ==============" \ .format(self.case_number, self._testMethodName)) if not self.skip_init_check_cbserver: self._log_start(self) self.sleep(10)