def use_cluster(cluster_name, nodes, ipformat=None, start=True): global CCM_CLUSTER if USE_CASS_EXTERNAL: if CCM_CLUSTER: log.debug("Using external CCM cluster {0}".format(CCM_CLUSTER.name)) else: log.debug("Using unnamed external cluster") return if is_current_cluster(cluster_name, nodes): log.debug("Using existing cluster, matching topology: {0}".format(cluster_name)) else: if CCM_CLUSTER: log.debug("Stopping existing cluster, topology mismatch: {0}".format(CCM_CLUSTER.name)) CCM_CLUSTER.stop() try: CCM_CLUSTER = CCMClusterFactory.load(path, cluster_name) log.debug("Found existing CCM cluster, {0}; clearing.".format(cluster_name)) CCM_CLUSTER.clear() CCM_CLUSTER.set_install_dir(**CCM_KWARGS) except Exception: ex_type, ex, tb = sys.exc_info() log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb log.debug("Creating new CCM cluster, {0}, with args {1}".format(cluster_name, CCM_KWARGS)) if DSE_VERSION: log.error("creating dse cluster") CCM_CLUSTER = DseCluster(path, cluster_name, **CCM_KWARGS) else: CCM_CLUSTER = CCMCluster(path, cluster_name, **CCM_KWARGS) CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) if CASSANDRA_VERSION >= '2.2': CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) if CASSANDRA_VERSION >= '3.0': CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) common.switch_cluster(path, cluster_name) CCM_CLUSTER.populate(nodes, ipformat=ipformat) try: jvm_args = [] # This will enable the Mirroring query handler which will echo our custom payload k,v pairs back if PROTOCOL_VERSION >= 4: jvm_args = [" -Dcassandra.custom_query_handler_class=org.apache.cassandra.cql3.CustomPayloadMirroringQueryHandler"] if start: log.debug("Starting CCM cluster: {0}".format(cluster_name)) CCM_CLUSTER.start(wait_for_binary_proto=True, wait_other_notice=True, jvm_args=jvm_args) setup_keyspace(ipformat=ipformat) except Exception: log.exception("Failed to start CCM cluster; removing cluster.") if os.name == "nt": if CCM_CLUSTER: for node in CCM_CLUSTER.nodes.itervalues(): os.system("taskkill /F /PID " + str(node.pid)) else: call(["pkill", "-9", "-f", ".ccm"]) remove_cluster() raise
def use_cluster(cluster_name, nodes, ipformat=None, start=True): if is_current_cluster(cluster_name, nodes): log.debug("Using existing cluster %s", cluster_name) return global CCM_CLUSTER if CCM_CLUSTER: log.debug("Stopping cluster %s", CCM_CLUSTER.name) CCM_CLUSTER.stop() try: try: cluster = CCMClusterFactory.load(path, cluster_name) log.debug("Found existing ccm %s cluster; clearing", cluster_name) cluster.clear() cluster.set_install_dir(**CCM_KWARGS) except Exception: log.debug("Creating new ccm %s cluster with %s", cluster_name, CCM_KWARGS) cluster = CCMCluster(path, cluster_name, **CCM_KWARGS) cluster.set_configuration_options({'start_native_transport': True}) common.switch_cluster(path, cluster_name) cluster.populate(nodes, ipformat=ipformat) if start: log.debug("Starting ccm %s cluster", cluster_name) cluster.start(wait_for_binary_proto=True, wait_other_notice=True) setup_test_keyspace() CCM_CLUSTER = cluster except Exception: log.exception("Failed to start ccm cluster:") raise
def setUp(self): global CURRENT_TEST CURRENT_TEST = self.id() + self._testMethodName # On Windows, forcefully terminate any leftover previously running cassandra processes. This is a temporary # workaround until we can determine the cause of intermittent hung-open tests and file-handles. if is_win(): try: import psutil for proc in psutil.process_iter(): try: pinfo = proc.as_dict(attrs=['pid', 'name', 'cmdline']) except psutil.NoSuchProcess: pass else: if (pinfo['name'] == 'java.exe' and '-Dcassandra' in pinfo['cmdline']): print 'Found running cassandra process with pid: ' + str(pinfo['pid']) + '. Killing.' psutil.Process(pinfo['pid']).kill() except ImportError: debug("WARN: psutil not installed. Cannot detect and kill running cassandra processes - you may see cascading dtest failures.") # cleaning up if a previous execution didn't trigger tearDown (which # can happen if it is interrupted by KeyboardInterrupt) # TODO: move that part to a generic fixture if os.path.exists(LAST_TEST_DIR): with open(LAST_TEST_DIR) as f: self.test_path = f.readline().strip('\n') name = f.readline() try: self.cluster = ClusterFactory.load(self.test_path, name) # Avoid waiting too long for node to be marked down if not self._preserve_cluster: self._cleanup_cluster() except IOError: # after a restart, /tmp will be emptied so we'll get an IOError when loading the old cluster here pass self.cluster = self._get_cluster() if ENABLE_ACTIVE_LOG_WATCHING: if not self.allow_log_errors: self.begin_active_log_watch() if RECORD_COVERAGE: self.__setup_jacoco() try: self.init_config() except NotImplementedError: debug("Custom init_config not found. Setting defaults.") self.init_default_config() with open(LAST_TEST_DIR, 'w') as f: f.write(self.test_path + '\n') f.write(self.cluster.name) self.modify_log(self.cluster) self.connections = [] self.runners = [] self.maxDiff = None
def _load_current_cluster(self): name = common.current_cluster_name(self.path) if name is None: print_('No currently active cluster (use ccm cluster switch)') exit(1) try: return ClusterFactory.load(self.path, name) except common.LoadError as e: print_(str(e)) exit(1)
def use_cluster(cluster_name, nodes, ipformat=None, start=True): global CCM_CLUSTER if USE_CASS_EXTERNAL: if CCM_CLUSTER: log.debug("Using external CCM cluster {0}".format(CCM_CLUSTER.name)) else: log.debug("Using unnamed external cluster") return if is_current_cluster(cluster_name, nodes): log.debug("Using existing cluster, matching topology: {0}".format(cluster_name)) else: if CCM_CLUSTER: log.debug("Stopping existing cluster, topology mismatch: {0}".format(CCM_CLUSTER.name)) CCM_CLUSTER.stop() try: CCM_CLUSTER = CCMClusterFactory.load(path, cluster_name) log.debug("Found existing CCM cluster, {0}; clearing.".format(cluster_name)) CCM_CLUSTER.clear() CCM_CLUSTER.set_install_dir(**CCM_KWARGS) except Exception: ex_type, ex, tb = sys.exc_info() log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb log.debug("Creating new CCM cluster, {0}, with args {1}".format(cluster_name, CCM_KWARGS)) CCM_CLUSTER = CCMCluster(path, cluster_name, **CCM_KWARGS) CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) if CASSANDRA_VERSION >= '2.2': CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) common.switch_cluster(path, cluster_name) CCM_CLUSTER.populate(nodes, ipformat=ipformat) try: jvm_args = [] # This will enable the Mirroring query handler which will echo our custom payload k,v pairs back if PROTOCOL_VERSION >= 4: jvm_args = [" -Dcassandra.custom_query_handler_class=org.apache.cassandra.cql3.CustomPayloadMirroringQueryHandler"] if start: log.debug("Starting CCM cluster: {0}".format(cluster_name)) CCM_CLUSTER.start(wait_for_binary_proto=True, wait_other_notice=True, jvm_args=jvm_args) setup_keyspace(ipformat=ipformat) except Exception: log.exception("Failed to start CCM cluster; removing cluster.") if os.name == "nt": if CCM_CLUSTER: for node in CCM_CLUSTER.nodes.itervalues(): os.system("taskkill /F /PID " + str(node.pid)) else: call(["pkill", "-9", "-f", ".ccm"]) remove_cluster() raise
def run(self): if self.other_cluster: # Remove the specified cluster: cluster = ClusterFactory.load(self.path, self.other_cluster) cluster.remove() # Remove CURRENT flag if the specified cluster is the current cluster: if self.other_cluster == common.current_cluster_name(self.path): os.remove(os.path.join(self.path, "CURRENT")) else: # Remove the current cluster: self.cluster.remove() os.remove(os.path.join(self.path, "CURRENT"))
def run(self): if self.other_cluster: # Remove the specified cluster: cluster = ClusterFactory.load(self.path, self.other_cluster) cluster.remove() # Remove CURRENT flag if the specified cluster is the current cluster: if self.other_cluster == common.current_cluster_name(self.path): os.remove(os.path.join(self.path, 'CURRENT')) else: # Remove the current cluster: self.cluster.remove() os.remove(os.path.join(self.path, 'CURRENT'))
def maybe_cleanup_cluster_from_last_test_file(): # cleaning up if a previous execution didn't trigger tearDown (which # can happen if it is interrupted by KeyboardInterrupt) if os.path.exists(LAST_TEST_DIR): with open(LAST_TEST_DIR) as f: test_path = f.readline().strip('\n') name = f.readline() try: cluster = ClusterFactory.load(test_path, name) # Avoid waiting too long for node to be marked down cleanup_cluster(cluster, test_path) except IOError: # after a restart, /tmp will be emptied so we'll get an IOError when loading the old cluster here pass
def teardown_package(): # when multiple modules are run explicitly, this runs between them # need to make sure CCM_CLUSTER is properly cleared for that case remove_cluster() for cluster_name in [CLUSTER_NAME, MULTIDC_CLUSTER_NAME]: try: cluster = CCMClusterFactory.load(path, cluster_name) try: cluster.remove() log.info('Removed cluster: %s' % cluster_name) except Exception: log.exception('Failed to remove cluster: %s' % cluster_name) except Exception: log.warn('Did not find cluster: %s' % cluster_name)
def setUp(self): global CURRENT_TEST CURRENT_TEST = self.id() + self._testMethodName # cleaning up if a previous execution didn't trigger tearDown (which # can happen if it is interrupted by KeyboardInterrupt) # TODO: move that part to a generic fixture if os.path.exists(LAST_TEST_DIR): with open(LAST_TEST_DIR) as f: self.test_path = f.readline().strip('\n') name = f.readline() try: self.cluster = ClusterFactory.load(self.test_path, name) # Avoid waiting too long for node to be marked down if not self._preserve_cluster: self._cleanup_cluster() except IOError: # after a restart, /tmp will be emptied so we'll get an IOError when loading the old cluster here pass self.cluster = self._get_cluster() if RECORD_COVERAGE: self.__setup_jacoco() # the failure detector can be quite slow in such tests with quick start/stop self.cluster.set_configuration_options( values={'phi_convict_threshold': 5}) timeout = 10000 if self.cluster_options is not None: self.cluster.set_configuration_options(values=self.cluster_options) else: self.cluster.set_configuration_options( values={ 'read_request_timeout_in_ms': timeout, 'range_request_timeout_in_ms': timeout, 'write_request_timeout_in_ms': timeout, 'truncate_request_timeout_in_ms': timeout, 'request_timeout_in_ms': timeout }) with open(LAST_TEST_DIR, 'w') as f: f.write(self.test_path + '\n') f.write(self.cluster.name) if DEBUG: self.cluster.set_log_level("DEBUG") if TRACE: self.cluster.set_log_level("TRACE") self.connections = [] self.runners = []
def use_cluster(cluster_name, nodes, ipformat=None, start=True): global CCM_CLUSTER if USE_CASS_EXTERNAL: if CCM_CLUSTER: log.debug("Using external ccm cluster %s", CCM_CLUSTER.name) else: log.debug("Using unnamed external cluster") return if is_current_cluster(cluster_name, nodes): log.debug("Using existing cluster %s", cluster_name) return if CCM_CLUSTER: log.debug("Stopping cluster %s", CCM_CLUSTER.name) CCM_CLUSTER.stop() try: try: cluster = CCMClusterFactory.load(path, cluster_name) log.debug("Found existing ccm %s cluster; clearing", cluster_name) cluster.clear() cluster.set_install_dir(**CCM_KWARGS) except Exception: log.debug("Creating new ccm %s cluster with %s", cluster_name, CCM_KWARGS) cluster = CCMCluster(path, cluster_name, **CCM_KWARGS) cluster.set_configuration_options({'start_native_transport': True}) if CASSANDRA_VERSION >= '2.2': cluster.set_configuration_options({'enable_user_defined_functions': True}) common.switch_cluster(path, cluster_name) cluster.populate(nodes, ipformat=ipformat) jvm_args = [] # This will enable the Mirroring query handler which will echo our custom payload k,v pairs back if PROTOCOL_VERSION >= 4: jvm_args = [" -Dcassandra.custom_query_handler_class=org.apache.cassandra.cql3.CustomPayloadMirroringQueryHandler"] if start: log.debug("Starting ccm %s cluster", cluster_name) cluster.start(wait_for_binary_proto=True, wait_other_notice=True, jvm_args=jvm_args) setup_keyspace(ipformat=ipformat) CCM_CLUSTER = cluster except Exception: log.exception("Failed to start ccm cluster. Removing cluster.") remove_cluster() call(["pkill", "-9", "-f", ".ccm"]) raise
def setUp(self): global CURRENT_TEST CURRENT_TEST = self.id() + self._testMethodName # cleaning up if a previous execution didn't trigger tearDown (which # can happen if it is interrupted by KeyboardInterrupt) # TODO: move that part to a generic fixture if os.path.exists(LAST_TEST_DIR): with open(LAST_TEST_DIR) as f: self.test_path = f.readline().strip('\n') name = f.readline() try: self.cluster = ClusterFactory.load(self.test_path, name) # Avoid waiting too long for node to be marked down if not self._preserve_cluster: self._cleanup_cluster() except IOError: # after a restart, /tmp will be emptied so we'll get an IOError when loading the old cluster here pass self.cluster = self._get_cluster() if RECORD_COVERAGE: self.__setup_jacoco() # the failure detector can be quite slow in such tests with quick start/stop self.cluster.set_configuration_options(values={'phi_convict_threshold': 5}) timeout = 10000 if self.cluster_options is not None: self.cluster.set_configuration_options(values=self.cluster_options) else: self.cluster.set_configuration_options(values={ 'read_request_timeout_in_ms' : timeout, 'range_request_timeout_in_ms' : timeout, 'write_request_timeout_in_ms' : timeout, 'truncate_request_timeout_in_ms' : timeout, 'request_timeout_in_ms' : timeout }) with open(LAST_TEST_DIR, 'w') as f: f.write(self.test_path + '\n') f.write(self.cluster.name) if DEBUG: self.cluster.set_log_level("DEBUG") if TRACE: self.cluster.set_log_level("TRACE") self.connections = [] self.runners = []
def tearDownClass(cls): reset_environment_vars() if os.path.exists(LAST_TEST_DIR): with open(LAST_TEST_DIR) as f: test_path = f.readline().strip('\n') name = f.readline() try: cluster = ClusterFactory.load(test_path, name) # Avoid waiting too long for node to be marked down if KEEP_TEST_DIR: cluster.stop(gently=RECORD_COVERAGE) else: cluster.remove() os.rmdir(test_path) os.remove(LAST_TEST_DIR) except IOError: # after a restart, /tmp will be emptied so we'll get an IOError when loading the old cluster here pass
def use_cluster(cluster_name, nodes, ipformat=None, start=True): global CCM_CLUSTER if USE_CASS_EXTERNAL: if CCM_CLUSTER: log.debug("Using external ccm cluster %s", CCM_CLUSTER.name) else: log.debug("Using unnamed external cluster") return if is_current_cluster(cluster_name, nodes): log.debug("Using existing cluster %s", cluster_name) return if CCM_CLUSTER: log.debug("Stopping cluster %s", CCM_CLUSTER.name) CCM_CLUSTER.stop() try: try: cluster = CCMClusterFactory.load(path, cluster_name) log.debug("Found existing ccm %s cluster; clearing", cluster_name) cluster.clear() cluster.set_install_dir(**CCM_KWARGS) except Exception: log.debug("Creating new ccm %s cluster with %s", cluster_name, CCM_KWARGS) cluster = CCMCluster(path, cluster_name, **CCM_KWARGS) cluster.set_configuration_options({'start_native_transport': True}) if CASSANDRA_VERSION >= '2.2': cluster.set_configuration_options({'enable_user_defined_functions': True}) common.switch_cluster(path, cluster_name) cluster.populate(nodes, ipformat=ipformat) if start: log.debug("Starting ccm %s cluster", cluster_name) cluster.start(wait_for_binary_proto=True, wait_other_notice=True) setup_keyspace(ipformat=ipformat) CCM_CLUSTER = cluster except Exception: log.exception("Failed to start ccm cluster. Removing cluster.") remove_cluster() call(["pkill", "-9", "-f", ".ccm"]) raise
def _start_cluster(self): """Docstring.""" try: cluster = CCMClusterFactory.load(self.CLUSTER_PATH, self.CLUSTER_NAME) logging.debug( "Found existing ccm {} cluster; clearing".format(self.CLUSTER_NAME) ) cluster.start(wait_for_binary_proto=True, wait_other_notice=True) self.CCM_CLUSTER = cluster except Exception: logging.debug( "Creating new ccm cluster {} with {}", self.CLUSTER_NAME, self.CLUSTER_KWARGS, ) cluster = CCMCluster( self.CLUSTER_PATH, self.CLUSTER_NAME, **self.CLUSTER_KWARGS ) cluster.set_configuration_options({"start_native_transport": True}) common.switch_cluster(self.CLUSTER_PATH, self.CLUSTER_NAME) cluster.populate(self.CLUSTER_NODE_COUNT, ipformat=None) cluster.start(wait_for_binary_proto=True, wait_other_notice=True) self.CCM_CLUSTER = cluster
def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[], set_keyspace=True, ccm_options=None, configuration_options={}, dse_cluster=False, dse_options={}, dse_version=None): if (dse_version and not dse_cluster): raise ValueError( 'specified dse_version {} but not dse_cluster'.format(dse_version)) set_default_cass_ip() if ccm_options is None and dse_cluster: ccm_options = {"version": dse_version or DSE_VERSION} elif ccm_options is None: ccm_options = CCM_KWARGS.copy() cassandra_version = ccm_options.get('version', CCM_VERSION) dse_version = ccm_options.get('version', DSE_VERSION) if 'version' in ccm_options: ccm_options['version'] = ccm_options['version'].base_version global CCM_CLUSTER if USE_CASS_EXTERNAL: if CCM_CLUSTER: log.debug("Using external CCM cluster {0}".format( CCM_CLUSTER.name)) else: log.debug("Using unnamed external cluster") if set_keyspace and start: setup_keyspace(ipformat=ipformat, wait=False) return if is_current_cluster(cluster_name, nodes): log.debug("Using existing cluster, matching topology: {0}".format( cluster_name)) else: if CCM_CLUSTER: log.debug( "Stopping existing cluster, topology mismatch: {0}".format( CCM_CLUSTER.name)) CCM_CLUSTER.stop() try: CCM_CLUSTER = CCMClusterFactory.load(path, cluster_name) log.debug("Found existing CCM cluster, {0}; clearing.".format( cluster_name)) CCM_CLUSTER.clear() CCM_CLUSTER.set_install_dir(**ccm_options) CCM_CLUSTER.set_configuration_options(configuration_options) except Exception: ex_type, ex, tb = sys.exc_info() log.warning("{0}: {1} Backtrace: {2}".format( ex_type.__name__, ex, traceback.extract_tb(tb))) del tb ccm_options.update(cmd_line_args_to_dict('CCM_ARGS')) log.debug("Creating new CCM cluster, {0}, with args {1}".format( cluster_name, ccm_options)) if dse_cluster: CCM_CLUSTER = DseCluster(path, cluster_name, **ccm_options) CCM_CLUSTER.set_configuration_options( {'start_native_transport': True}) CCM_CLUSTER.set_configuration_options( {'batch_size_warn_threshold_in_kb': 5}) if dse_version >= Version('5.0'): CCM_CLUSTER.set_configuration_options( {'enable_user_defined_functions': True}) CCM_CLUSTER.set_configuration_options( {'enable_scripted_user_defined_functions': True}) if 'spark' in workloads: config_options = {"initial_spark_worker_resources": 0.1} CCM_CLUSTER.set_dse_configuration_options(config_options) common.switch_cluster(path, cluster_name) CCM_CLUSTER.set_configuration_options(configuration_options) CCM_CLUSTER.populate(nodes, ipformat=ipformat) CCM_CLUSTER.set_dse_configuration_options(dse_options) else: CCM_CLUSTER = CCMCluster(path, cluster_name, **ccm_options) CCM_CLUSTER.set_configuration_options( {'start_native_transport': True}) if cassandra_version >= Version('2.2'): CCM_CLUSTER.set_configuration_options( {'enable_user_defined_functions': True}) if cassandra_version >= Version('3.0'): CCM_CLUSTER.set_configuration_options( {'enable_scripted_user_defined_functions': True}) common.switch_cluster(path, cluster_name) CCM_CLUSTER.set_configuration_options(configuration_options) CCM_CLUSTER.populate(nodes, ipformat=ipformat) try: jvm_args = [] # This will enable the Mirroring query handler which will echo our custom payload k,v pairs back if 'graph' not in workloads: if PROTOCOL_VERSION >= 4: jvm_args = [ " -Dcassandra.custom_query_handler_class=org.apache.cassandra.cql3.CustomPayloadMirroringQueryHandler" ] if (len(workloads) > 0): for node in CCM_CLUSTER.nodes.values(): node.set_workloads(workloads) if start: log.debug("Starting CCM cluster: {0}".format(cluster_name)) CCM_CLUSTER.start(wait_for_binary_proto=True, wait_other_notice=True, jvm_args=jvm_args) # Added to wait for slow nodes to start up for node in CCM_CLUSTER.nodes.values(): wait_for_node_socket(node, 120) if set_keyspace: setup_keyspace(ipformat=ipformat) except Exception: log.exception("Failed to start CCM cluster; removing cluster.") if os.name == "nt": if CCM_CLUSTER: for node in six.itervalues(CCM_CLUSTER.nodes): os.system("taskkill /F /PID " + str(node.pid)) else: call(["pkill", "-9", "-f", ".ccm"]) remove_cluster() raise return CCM_CLUSTER
if '-' in version_string: version_string = version_string[:version_string.index('-')] return tuple([int(p) for p in version_string.split('.')]) USE_CASS_EXTERNAL = bool(os.getenv('USE_CASS_EXTERNAL', False)) default_cassandra_version = '2.1.3' if USE_CASS_EXTERNAL: if CCMClusterFactory: # see if the external instance is running in ccm path = common.get_default_path() name = common.current_cluster_name(path) CCM_CLUSTER = CCMClusterFactory.load(common.get_default_path(), name) CCM_CLUSTER.start(wait_for_binary_proto=True, wait_other_notice=True) # Not sure what's going on, but the server version query # hangs in python3. This appears to be related to running inside of # nosetests, and only for this query that would run while loading the # module. # This is a hack to make it run with default cassandra version for PY3. # Not happy with it, but need to move on for now. if not six.PY3: cass_ver, _ = get_server_versions() default_cassandra_version = '.'.join('%d' % i for i in cass_ver) else: if not os.getenv('CASSANDRA_VERSION'): log.warning("Using default C* version %s because external server cannot be queried" % default_cassandra_version)
def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, set_keyspace=True, ccm_options=None, configuration_options={}, dse_options={}, use_single_interface=USE_SINGLE_INTERFACE): dse_cluster = True if DSE_VERSION else False if not workloads: workloads = [] if ccm_options is None and DSE_VERSION: ccm_options = {"version": CCM_VERSION} elif ccm_options is None: ccm_options = CCM_KWARGS.copy() cassandra_version = ccm_options.get('version', CCM_VERSION) dse_version = ccm_options.get('version', DSE_VERSION) global CCM_CLUSTER if USE_CASS_EXTERNAL: if CCM_CLUSTER: log.debug("Using external CCM cluster {0}".format(CCM_CLUSTER.name)) else: ccm_path = os.getenv("CCM_PATH", None) ccm_name = os.getenv("CCM_NAME", None) if ccm_path and ccm_name: CCM_CLUSTER = CCMClusterFactory.load(ccm_path, ccm_name) log.debug("Using external CCM cluster {0}".format(CCM_CLUSTER.name)) else: log.debug("Using unnamed external cluster") if set_keyspace and start: setup_keyspace(ipformat=ipformat, wait=False) return if is_current_cluster(cluster_name, nodes, workloads): log.debug("Using existing cluster, matching topology: {0}".format(cluster_name)) else: if CCM_CLUSTER: log.debug("Stopping existing cluster, topology mismatch: {0}".format(CCM_CLUSTER.name)) CCM_CLUSTER.stop() try: CCM_CLUSTER = CCMClusterFactory.load(path, cluster_name) log.debug("Found existing CCM cluster, {0}; clearing.".format(cluster_name)) CCM_CLUSTER.clear() CCM_CLUSTER.set_install_dir(**ccm_options) CCM_CLUSTER.set_configuration_options(configuration_options) CCM_CLUSTER.set_dse_configuration_options(dse_options) except Exception: ex_type, ex, tb = sys.exc_info() log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb ccm_options.update(cmd_line_args_to_dict('CCM_ARGS')) log.debug("Creating new CCM cluster, {0}, with args {1}".format(cluster_name, ccm_options)) # Make sure we cleanup old cluster dir if it exists cluster_path = os.path.join(path, cluster_name) if os.path.exists(cluster_path): shutil.rmtree(cluster_path) if dse_cluster: CCM_CLUSTER = DseCluster(path, cluster_name, **ccm_options) CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) CCM_CLUSTER.set_configuration_options({'batch_size_warn_threshold_in_kb': 5}) if Version(dse_version) >= Version('5.0'): CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) if Version(dse_version) >= Version('5.1'): # For Inet4Address CCM_CLUSTER.set_dse_configuration_options({ 'graph': { 'gremlin_server': { 'scriptEngines': { 'gremlin-groovy': { 'config': { 'sandbox_rules': { 'whitelist_packages': ['java.net'] } } } } } } }) if 'spark' in workloads: if Version(dse_version) >= Version('6.8'): config_options = { "resource_manager_options": { "worker_options": { "cores_total": 0.1, "memory_total": "64M" } } } else: config_options = {"initial_spark_worker_resources": 0.1} if Version(dse_version) >= Version('6.7'): log.debug("Disabling AlwaysON SQL for a DSE 6.7 Cluster") config_options['alwayson_sql_options'] = {'enabled': False} CCM_CLUSTER.set_dse_configuration_options(config_options) common.switch_cluster(path, cluster_name) CCM_CLUSTER.set_configuration_options(configuration_options) CCM_CLUSTER.populate(nodes, ipformat=ipformat) CCM_CLUSTER.set_dse_configuration_options(dse_options) else: CCM_CLUSTER = CCMCluster(path, cluster_name, **ccm_options) CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) if Version(cassandra_version) >= Version('2.2'): CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) if Version(cassandra_version) >= Version('3.0'): CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) if Version(cassandra_version) >= Version('4.0-a'): CCM_CLUSTER.set_configuration_options({ 'enable_materialized_views': True, 'enable_sasi_indexes': True, 'enable_transient_replication': True, }) common.switch_cluster(path, cluster_name) CCM_CLUSTER.set_configuration_options(configuration_options) CCM_CLUSTER.populate(nodes, ipformat=ipformat, use_single_interface=use_single_interface) try: jvm_args = [] # This will enable the Mirroring query handler which will echo our custom payload k,v pairs back if 'graph' in workloads: jvm_args += ['-Xms1500M', '-Xmx1500M'] else: if PROTOCOL_VERSION >= 4: jvm_args = [" -Dcassandra.custom_query_handler_class=org.apache.cassandra.cql3.CustomPayloadMirroringQueryHandler"] if len(workloads) > 0: for node in CCM_CLUSTER.nodes.values(): node.set_workloads(workloads) if start: log.debug("Starting CCM cluster: {0}".format(cluster_name)) CCM_CLUSTER.start(jvm_args=jvm_args, wait_for_binary_proto=True) # Added to wait for slow nodes to start up log.debug("Cluster started waiting for binary ports") for node in CCM_CLUSTER.nodes.values(): wait_for_node_socket(node, 300) log.debug("Binary ports are open") if set_keyspace: setup_keyspace(ipformat=ipformat) except Exception: log.exception("Failed to start CCM cluster; removing cluster.") if os.name == "nt": if CCM_CLUSTER: for node in six.itervalues(CCM_CLUSTER.nodes): os.system("taskkill /F /PID " + str(node.pid)) else: call(["pkill", "-9", "-f", ".ccm"]) remove_cluster() raise return CCM_CLUSTER
def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[], set_keyspace=True, ccm_options=None, configuration_options={}): set_default_cass_ip() if ccm_options is None: ccm_options = CCM_KWARGS.copy() cassandra_version = ccm_options.get('version', CASSANDRA_VERSION) if 'version' in ccm_options: ccm_options['version'] = ccm_options['version'].base_version global CCM_CLUSTER if USE_CASS_EXTERNAL: if CCM_CLUSTER: log.debug("Using external CCM cluster {0}".format(CCM_CLUSTER.name)) else: log.debug("Using unnamed external cluster") if set_keyspace and start: setup_keyspace(ipformat=ipformat, wait=False) return if is_current_cluster(cluster_name, nodes): log.debug("Using existing cluster, matching topology: {0}".format(cluster_name)) else: if CCM_CLUSTER: log.debug("Stopping existing cluster, topology mismatch: {0}".format(CCM_CLUSTER.name)) CCM_CLUSTER.stop() try: CCM_CLUSTER = CCMClusterFactory.load(path, cluster_name) log.debug("Found existing CCM cluster, {0}; clearing.".format(cluster_name)) CCM_CLUSTER.clear() CCM_CLUSTER.set_install_dir(**ccm_options) CCM_CLUSTER.set_configuration_options(configuration_options) except Exception: ex_type, ex, tb = sys.exc_info() log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb log.debug("Creating new CCM cluster, {0}, with args {1}".format(cluster_name, ccm_options)) CCM_CLUSTER = CCMCluster(path, cluster_name, **ccm_options) CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) if cassandra_version >= Version('2.2'): CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) if cassandra_version >= Version('3.0'): CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) common.switch_cluster(path, cluster_name) CCM_CLUSTER.set_configuration_options(configuration_options) CCM_CLUSTER.populate(nodes, ipformat=ipformat) try: jvm_args = [] # This will enable the Mirroring query handler which will echo our custom payload k,v pairs back if 'graph' not in workloads: if PROTOCOL_VERSION >= 4: jvm_args = [" -Dcassandra.custom_query_handler_class=org.apache.cassandra.cql3.CustomPayloadMirroringQueryHandler"] if(len(workloads) > 0): for node in CCM_CLUSTER.nodes.values(): node.set_workloads(workloads) if start: log.debug("Starting CCM cluster: {0}".format(cluster_name)) CCM_CLUSTER.start(wait_for_binary_proto=True, wait_other_notice=True, jvm_args=jvm_args) # Added to wait for slow nodes to start up for node in CCM_CLUSTER.nodes.values(): wait_for_node_socket(node, 120) if set_keyspace: setup_keyspace(ipformat=ipformat) except Exception: log.exception("Failed to start CCM cluster; removing cluster.") if os.name == "nt": if CCM_CLUSTER: for node in six.itervalues(CCM_CLUSTER.nodes): os.system("taskkill /F /PID " + str(node.pid)) else: call(["pkill", "-9", "-f", ".ccm"]) remove_cluster() raise return CCM_CLUSTER
def setUp(self): global CURRENT_TEST CURRENT_TEST = self.id() + self._testMethodName # On Windows, forcefully terminate any leftover previously running cassandra processes. This is a temporary # workaround until we can determine the cause of intermittent hung-open tests and file-handles. if is_win(): try: import psutil for proc in psutil.process_iter(): try: pinfo = proc.as_dict(attrs=['pid', 'name', 'cmdline']) except psutil.NoSuchProcess: pass else: if (pinfo['name'] == 'java.exe' and '-Dcassandra' in pinfo['cmdline']): print 'Found running cassandra process with pid: ' + str( pinfo['pid']) + '. Killing.' psutil.Process(pinfo['pid']).kill() except ImportError: debug( "WARN: psutil not installed. Cannot detect and kill running cassandra processes - you may see cascading dtest failures." ) # cleaning up if a previous execution didn't trigger tearDown (which # can happen if it is interrupted by KeyboardInterrupt) # TODO: move that part to a generic fixture if os.path.exists(LAST_TEST_DIR): with open(LAST_TEST_DIR) as f: self.test_path = f.readline().strip('\n') name = f.readline() try: self.cluster = ClusterFactory.load(self.test_path, name) # Avoid waiting too long for node to be marked down if not self._preserve_cluster: self._cleanup_cluster() except IOError: # after a restart, /tmp will be emptied so we'll get an IOError when loading the old cluster here pass self.cluster = self._get_cluster() if RECORD_COVERAGE: self.__setup_jacoco() # the failure detector can be quite slow in such tests with quick start/stop self.cluster.set_configuration_options( values={'phi_convict_threshold': 5}) timeout = 10000 if self.cluster_options is not None: self.cluster.set_configuration_options(values=self.cluster_options) else: self.cluster.set_configuration_options( values={ 'read_request_timeout_in_ms': timeout, 'range_request_timeout_in_ms': timeout, 'write_request_timeout_in_ms': timeout, 'truncate_request_timeout_in_ms': timeout, 'request_timeout_in_ms': timeout }) with open(LAST_TEST_DIR, 'w') as f: f.write(self.test_path + '\n') f.write(self.cluster.name) self.modify_log(self.cluster) self.connections = [] self.runners = []
def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[], set_keyspace=True, ccm_options=None, configuration_options={}, dse_cluster=False, dse_options={}, dse_version=None): if (dse_version and not dse_cluster): raise ValueError('specified dse_version {} but not dse_cluster'.format(dse_version)) set_default_cass_ip() if ccm_options is None and dse_cluster: ccm_options = {"version": dse_version or DSE_VERSION} elif ccm_options is None: ccm_options = CCM_KWARGS.copy() cassandra_version = ccm_options.get('version', CCM_VERSION) dse_version = ccm_options.get('version', DSE_VERSION) if 'version' in ccm_options: ccm_options['version'] = ccm_options['version'].base_version global CCM_CLUSTER if USE_CASS_EXTERNAL: if CCM_CLUSTER: log.debug("Using external CCM cluster {0}".format(CCM_CLUSTER.name)) else: log.debug("Using unnamed external cluster") if set_keyspace and start: setup_keyspace(ipformat=ipformat, wait=False) return if is_current_cluster(cluster_name, nodes): log.debug("Using existing cluster, matching topology: {0}".format(cluster_name)) else: if CCM_CLUSTER: log.debug("Stopping existing cluster, topology mismatch: {0}".format(CCM_CLUSTER.name)) CCM_CLUSTER.stop() try: CCM_CLUSTER = CCMClusterFactory.load(path, cluster_name) log.debug("Found existing CCM cluster, {0}; clearing.".format(cluster_name)) CCM_CLUSTER.clear() CCM_CLUSTER.set_install_dir(**ccm_options) CCM_CLUSTER.set_configuration_options(configuration_options) except Exception: ex_type, ex, tb = sys.exc_info() log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb ccm_options.update(cmd_line_args_to_dict('CCM_ARGS')) log.debug("Creating new CCM cluster, {0}, with args {1}".format(cluster_name, ccm_options)) # Make sure we cleanup old cluster dir if it exists cluster_path = os.path.join(path, cluster_name) if os.path.exists(cluster_path): shutil.rmtree(cluster_path) if dse_cluster: CCM_CLUSTER = DseCluster(path, cluster_name, **ccm_options) CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) CCM_CLUSTER.set_configuration_options({'batch_size_warn_threshold_in_kb': 5}) if dse_version >= Version('5.0'): CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) if 'spark' in workloads: config_options = {"initial_spark_worker_resources": 0.1} CCM_CLUSTER.set_dse_configuration_options(config_options) common.switch_cluster(path, cluster_name) CCM_CLUSTER.set_configuration_options(configuration_options) CCM_CLUSTER.populate(nodes, ipformat=ipformat) CCM_CLUSTER.set_dse_configuration_options(dse_options) else: CCM_CLUSTER = CCMCluster(path, cluster_name, **ccm_options) CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) if cassandra_version >= Version('2.2'): CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) if cassandra_version >= Version('3.0'): CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) common.switch_cluster(path, cluster_name) CCM_CLUSTER.set_configuration_options(configuration_options) CCM_CLUSTER.populate(nodes, ipformat=ipformat) try: jvm_args = [] # This will enable the Mirroring query handler which will echo our custom payload k,v pairs back if 'graph' not in workloads: if PROTOCOL_VERSION >= 4: jvm_args = [" -Dcassandra.custom_query_handler_class=org.apache.cassandra.cql3.CustomPayloadMirroringQueryHandler"] if(len(workloads) > 0): for node in CCM_CLUSTER.nodes.values(): node.set_workloads(workloads) if start: log.debug("Starting CCM cluster: {0}".format(cluster_name)) CCM_CLUSTER.start(wait_for_binary_proto=True, wait_other_notice=True, jvm_args=jvm_args) # Added to wait for slow nodes to start up for node in CCM_CLUSTER.nodes.values(): wait_for_node_socket(node, 120) if set_keyspace: setup_keyspace(ipformat=ipformat) except Exception: log.exception("Failed to start CCM cluster; removing cluster.") if os.name == "nt": if CCM_CLUSTER: for node in six.itervalues(CCM_CLUSTER.nodes): os.system("taskkill /F /PID " + str(node.pid)) else: call(["pkill", "-9", "-f", ".ccm"]) remove_cluster() raise return CCM_CLUSTER
def setUp(self): global CURRENT_TEST CURRENT_TEST = self.id() + self._testMethodName # On Windows, forcefully terminate any leftover previously running cassandra processes. This is a temporary # workaround until we can determine the cause of intermittent hung-open tests and file-handles. if is_win(): try: import psutil for proc in psutil.process_iter(): try: pinfo = proc.as_dict(attrs=['pid', 'name', 'cmdline']) except psutil.NoSuchProcess: pass else: if (pinfo['name'] == 'java.exe' and '-Dcassandra' in pinfo['cmdline']): print 'Found running cassandra process with pid: ' + str(pinfo['pid']) + '. Killing.' psutil.Process(pinfo['pid']).kill() except ImportError: debug("WARN: psutil not installed. Cannot detect and kill running cassandra processes - you may see cascading dtest failures.") # cleaning up if a previous execution didn't trigger tearDown (which # can happen if it is interrupted by KeyboardInterrupt) # TODO: move that part to a generic fixture if os.path.exists(LAST_TEST_DIR): with open(LAST_TEST_DIR) as f: self.test_path = f.readline().strip('\n') name = f.readline() try: self.cluster = ClusterFactory.load(self.test_path, name) # Avoid waiting too long for node to be marked down if not self._preserve_cluster: self._cleanup_cluster() except IOError: # after a restart, /tmp will be emptied so we'll get an IOError when loading the old cluster here pass self.cluster = self._get_cluster() if RECORD_COVERAGE: self.__setup_jacoco() # the failure detector can be quite slow in such tests with quick start/stop self.cluster.set_configuration_options(values={'phi_convict_threshold': 5}) timeout = 10000 if self.cluster_options is not None: self.cluster.set_configuration_options(values=self.cluster_options) else: self.cluster.set_configuration_options(values={ 'read_request_timeout_in_ms': timeout, 'range_request_timeout_in_ms': timeout, 'write_request_timeout_in_ms': timeout, 'truncate_request_timeout_in_ms': timeout, 'request_timeout_in_ms': timeout }) with open(LAST_TEST_DIR, 'w') as f: f.write(self.test_path + '\n') f.write(self.cluster.name) self.modify_log(self.cluster) self.connections = [] self.runners = []
def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=[]): set_default_dse_ip() global CCM_CLUSTER if USE_CASS_EXTERNAL: if CCM_CLUSTER: log.debug("Using external CCM cluster {0}".format( CCM_CLUSTER.name)) else: log.debug("Using unnamed external cluster") setup_keyspace(ipformat=ipformat, wait=False) return if is_current_cluster(cluster_name, nodes, workloads): log.debug("Using existing cluster, matching topology: {0}".format( cluster_name)) else: if CCM_CLUSTER: log.debug( "Stopping existing cluster, topology mismatch: {0}".format( CCM_CLUSTER.name)) CCM_CLUSTER.stop() try: CCM_CLUSTER = CCMClusterFactory.load(path, cluster_name) log.debug("Found existing CCM cluster, {0}; clearing.".format( cluster_name)) CCM_CLUSTER.clear() CCM_CLUSTER.set_install_dir(**CCM_KWARGS) except Exception: ex_type, ex, tb = sys.exc_info() log.warn("{0}: {1} Backtrace: {2}".format( ex_type.__name__, ex, traceback.extract_tb(tb))) del tb log.debug("Creating new CCM cluster, {0}, with args {1}".format( cluster_name, CCM_KWARGS)) if DSE_VERSION: log.error("creating dse cluster") CCM_CLUSTER = DseCluster(path, cluster_name, **CCM_KWARGS) else: CCM_CLUSTER = CCMCluster(path, cluster_name, **CCM_KWARGS) CCM_CLUSTER.set_configuration_options( {'start_native_transport': True}) CCM_CLUSTER.set_configuration_options( {'batch_size_warn_threshold_in_kb': 5}) if CASSANDRA_VERSION >= '2.2': CCM_CLUSTER.set_configuration_options( {'enable_user_defined_functions': True}) if CASSANDRA_VERSION >= '3.0': CCM_CLUSTER.set_configuration_options( {'enable_scripted_user_defined_functions': True}) if 'spark' in workloads: config_options = {"initial_spark_worker_resources": 0.1} CCM_CLUSTER.set_dse_configuration_options(config_options) common.switch_cluster(path, cluster_name) CCM_CLUSTER.populate(nodes, ipformat=ipformat) try: jvm_args = [] # This will enable the Mirroring query handler which will echo our custom payload k,v pairs back if 'graph' not in workloads: if PROTOCOL_VERSION >= 4: jvm_args = [ " -Dcassandra.custom_query_handler_class=org.apache.cassandra.cql3.CustomPayloadMirroringQueryHandler" ] if (len(workloads) > 0): for node in CCM_CLUSTER.nodes.values(): node.set_workloads(workloads) if start: log.debug("Starting CCM cluster: {0}".format(cluster_name)) CCM_CLUSTER.start(no_wait=True, jvm_args=jvm_args) # Added to wait for slow nodes to start up log.debug("Cluster started waiting for binary ports") for node in CCM_CLUSTER.nodes.values(): wait_for_node_socket(node, 120) log.debug("Binary port are open") setup_keyspace(ipformat=ipformat) except Exception: log.exception("Failed to start CCM cluster; removing cluster.") if os.name == "nt": if CCM_CLUSTER: for node in CCM_CLUSTER.nodes.itervalues(): os.system("taskkill /F /PID " + str(node.pid)) else: call(["pkill", "-9", "-f", ".ccm"]) remove_cluster() raise
def setUp(self): global CURRENT_TEST CURRENT_TEST = self.id() + self._testMethodName # On Windows, forcefully terminate any leftover previously running cassandra processes. This is a temporary # workaround until we can determine the cause of intermittent hung-open tests and file-handles. if is_win(): try: import psutil for proc in psutil.process_iter(): try: pinfo = proc.as_dict(attrs=['pid', 'name', 'cmdline']) except psutil.NoSuchProcess: pass else: if (pinfo['name'] == 'java.exe' and '-Dcassandra' in pinfo['cmdline']): print 'Found running cassandra process with pid: ' + str( pinfo['pid']) + '. Killing.' psutil.Process(pinfo['pid']).kill() except ImportError: debug( "WARN: psutil not installed. Cannot detect and kill running cassandra processes - you may see cascading dtest failures." ) # cleaning up if a previous execution didn't trigger tearDown (which # can happen if it is interrupted by KeyboardInterrupt) # TODO: move that part to a generic fixture if os.path.exists(LAST_TEST_DIR): with open(LAST_TEST_DIR) as f: self.test_path = f.readline().strip('\n') name = f.readline() try: self.cluster = ClusterFactory.load(self.test_path, name) # Avoid waiting too long for node to be marked down if not self._preserve_cluster: self._cleanup_cluster() except IOError: # after a restart, /tmp will be emptied so we'll get an IOError when loading the old cluster here pass self.cluster = self._get_cluster() if ENABLE_ACTIVE_LOG_WATCHING: if not self.allow_log_errors: self.begin_active_log_watch() if RECORD_COVERAGE: self.__setup_jacoco() try: self.init_config() except NotImplementedError: debug("Custom init_config not found. Setting defaults.") self.init_default_config() with open(LAST_TEST_DIR, 'w') as f: f.write(self.test_path + '\n') f.write(self.cluster.name) self.modify_log(self.cluster) self.connections = [] self.runners = []
if '-' in version_string: version_string = version_string[:version_string.index('-')] return tuple([int(p) for p in version_string.split('.')]) USE_CASS_EXTERNAL = bool(os.getenv('USE_CASS_EXTERNAL', False)) default_cassandra_version = '2.1.3' if USE_CASS_EXTERNAL: if CCMClusterFactory: # see if the external instance is running in ccm path = common.get_default_path() name = common.current_cluster_name(path) CCM_CLUSTER = CCMClusterFactory.load(common.get_default_path(), name) CCM_CLUSTER.start(wait_for_binary_proto=True, wait_other_notice=True) # Not sure what's going on, but the server version query # hangs in python3. This appears to be related to running inside of # nosetests, and only for this query that would run while loading the # module. # This is a hack to make it run with default cassandra version for PY3. # Not happy with it, but need to move on for now. if not six.PY3: cass_ver, _ = get_server_versions() default_cassandra_version = '.'.join('%d' % i for i in cass_ver) else: if not os.getenv('CASSANDRA_VERSION'): log.warning( "Using default C* version %s because external server cannot be queried"