def set_log_level(self, new_level, class_names=None): class_names = class_names or [] known_level = ['TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR', 'OFF'] if new_level not in known_level: raise common.ArgumentError("Unknown log level %s (use one of %s)" % (new_level, " ".join(known_level))) if class_names: for class_name in class_names: if new_level == 'DEBUG': if class_name in self._trace: raise common.ArgumentError( "Class %s already in TRACE" % (class_name)) self._debug.append(class_name) if new_level == 'TRACE': if class_name in self._debug: raise common.ArgumentError( "Class %s already in DEBUG" % (class_name)) self._trace.append(class_name) else: self.__log_level = new_level self._update_config() for node in self.nodelist(): for class_name in class_names: node.set_log_level(new_level, class_name)
def add(self, node, is_seed, data_center=None): if node.name in self.nodes: raise common.ArgumentError('Cannot create existing node %s' % node.name) self.nodes[node.name] = node if is_seed: self.seeds.append(node) self._update_config() node.data_center = data_center if data_center is None: for existing_node in self.nodelist(): if existing_node.data_center is not None: raise common.ArgumentError( 'Please specify the DC this node should be added to') node.set_log_level(self.__log_level) for debug_class in self._debug: node.set_log_level("DEBUG", debug_class) for trace_class in self._trace: node.set_log_level("TRACE", trace_class) if data_center is not None: self.__update_topology_files() node._save() return self
def populate(self, nodes, debug=False, tokens=None, use_vnodes=False, ipprefix='127.0.0.', ipformat=None, install_byteman=False): node_count = nodes dcs = [] self.use_vnodes = use_vnodes if isinstance(nodes, list): self.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'}) node_count = 0 i = 0 for c in nodes: i = i + 1 node_count = node_count + c for x in xrange(0, c): dcs.append('dc%d' % i) if node_count < 1: raise common.ArgumentError('invalid node count %s' % nodes) for i in xrange(1, node_count + 1): if 'node%s' % i in list(self.nodes.values()): raise common.ArgumentError('Cannot create existing node node%s' % i) if tokens is None and not use_vnodes: if dcs is None or len(dcs) <= 1: tokens = self.balanced_tokens(node_count) else: tokens = self.balanced_tokens_across_dcs(dcs) if not ipformat: ipformat = ipprefix + "%d" for i in xrange(1, node_count + 1): tk = None if tokens is not None and i - 1 < len(tokens): tk = tokens[i - 1] dc = dcs[i - 1] if i - 1 < len(dcs) else None binary = None if self.cassandra_version() >= '1.2': binary = (ipformat % i, 9042) node = self.create_node(name='node%s' % i, auto_bootstrap=False, thrift_interface=(ipformat % i, 9160), storage_interface=(ipformat % i, 7000), jmx_port=str(7000 + i * 100), remote_debug_port=str(2000 + i * 100) if debug else str(0), byteman_port=str(4000 + i * 100) if install_byteman else str(0), initial_token=tk, binary_interface=binary, environment_variables=self._environment_variables) self.add(node, True, dc) self._update_config() return self
def populate(self, nodes, debug=False, tokens=None, use_vnodes=False, ipprefix='127.0.0.'): node_count = nodes dcs = [] if isinstance(nodes, list): self.set_configuration_options( values={ 'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch' }) node_count = 0 i = 0 for c in nodes: i = i + 1 node_count = node_count + c for x in xrange(0, c): dcs.append('dc%d' % i) if node_count < 1: raise common.ArgumentError('invalid node count %s' % nodes) for i in xrange(1, node_count + 1): if 'node%s' % i in list(self.nodes.values()): raise common.ArgumentError( 'Cannot create existing node node%s' % i) if tokens is None and not use_vnodes: tokens = self.balanced_tokens(node_count) for i in xrange(1, node_count + 1): tk = None if tokens is not None and i - 1 < len(tokens): tk = tokens[i - 1] dc = dcs[i - 1] if i - 1 < len(dcs) else None binary = None if self.version() >= '1.2': binary = ('%s%s' % (ipprefix, i), 9042) node = Node('node%s' % i, self, False, ('%s%s' % (ipprefix, i), 9160), ('%s%s' % (ipprefix, i), 7000), str(7000 + i * 100), (str(0), str(2000 + i * 100))[debug == True], tk, binary_interface=binary) self.add(node, True, dc) self.__update_config() return self
def populate(self, nodes, debug=False, tokens=None, use_vnodes=False, ipprefix=None, ipformat=None): if ipprefix: self.ipprefix = ipprefix elif not self.ipprefix: self.ipprefix = '127.0.0.' if ipformat: self.ipformat = ipformat elif not self.ipformat: self.ipformat = self.ipprefix + "%d" node_count = nodes dcs = [] self.use_vnodes = use_vnodes if isinstance(nodes, list): self.set_configuration_options( values={'endpoint_snitch': self.snitch}) node_count = 0 i = 0 for c in nodes: i = i + 1 node_count = node_count + c for x in xrange(0, c): dcs.append('dc%d' % i) if node_count < 1: raise common.ArgumentError('invalid node count %s' % nodes) for i in xrange(1, node_count + 1): if 'node%s' % i in list(self.nodes.values()): raise common.ArgumentError( 'Cannot create existing node node%s' % i) if tokens is None and not use_vnodes: if dcs is None or len(dcs) <= 1: tokens = self.balanced_tokens(node_count) else: tokens = self.balanced_tokens_across_dcs(dcs) for i in xrange(1, node_count + 1): tk = None if tokens is not None and i - 1 < len(tokens): tk = tokens[i - 1] dc = dcs[i - 1] if i - 1 < len(dcs) else None self.new_node(i, debug=debug, initial_token=tk, data_center=dc) self._update_config() return self
def run_cli(self, cmds=None, show_output=False, cli_options=None): if cli_options is None: cli_options = [] livenodes = [node for node in list(self.nodes.values()) if node.is_live()] if len(livenodes) == 0: raise common.ArgumentError("No live node") livenodes[0].run_cli(cmds, show_output, cli_options)
def nodetool(self, cmd, username=None, password=None, capture_output=True, wait=True): """ Setting wait=False makes it impossible to detect errors, if capture_output is also False. wait=False allows us to return while nodetool is still running. """ if capture_output and not wait: raise common.ArgumentError("Cannot set capture_output while wait is False.") env = self.get_env() nodetool = common.join_bin(self.get_install_dir(), 'bin', 'nodetool') args = [nodetool, '-h', 'localhost', '-p', str(self.jmx_port)] if username is not None: args += [ '-u', username] if password is not None: args += [ '-pw', password] args += cmd.split() if capture_output: p = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() else: p = subprocess.Popen(args, env=env) stdout, stderr = None, None if wait: exit_status = p.wait() if exit_status != 0: raise NodetoolError(" ".join(args), exit_status, stdout, stderr) return stdout, stderr
def set_log_level(self, new_level, class_name=None): known_level = [ 'TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR' ] if new_level not in known_level: raise common.ArgumentError("Unknown log level %s (use one of %s)" % (new_level, " ".join(known_level))) self.__log_level = new_level self.__update_config() for node in self.nodelist(): node.set_log_level(new_level, class_name)
def generate_dc_tokens(self, node_count, tokens): if self.cassandra_version() < '4' or ( self.partitioner and not ('Murmur3' in self.partitioner or 'Random' in self.partitioner)): raise common.ArgumentError( "generate-tokens script only for >=4.0 and Murmur3 or Random") if not ('num_tokens' in self._config_options and self._config_options['num_tokens'] is not None and int(self._config_options['num_tokens']) > 1): raise common.ArgumentError( "Cannot use generate-tokens script without num_tokens > 1") partitioner = 'RandomPartitioner' if ( self.partitioner and 'Random' in self.partitioner) else 'Murmur3Partitioner' generate_tokens = common.join_bin(self.get_install_dir(), os.path.join('tools', 'bin'), 'generatetokens') cmd_list = [ generate_tokens, '-n', str(node_count), '-t', str(self._config_options.get("num_tokens")), '--rf', str(min(3, node_count)), '-p', partitioner ] process = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy()) # the first line is "Generating tokens for X nodes with" and can be ignored process.stdout.readline() for n in range(1, node_count + 1): stdout_output = re.sub(r'^.*?:', '', process.stdout.readline().decode("utf-8")) node_tokens = stdout_output.replace('[', '').replace(' ', '').replace( ']', '').replace('\n', '') tokens.append(node_tokens) common.debug("pregenerated tokens from cmd_list: {} are {}".format( str(cmd_list), tokens))
def set_log_level(self, new_level, class_name=None): known_level = {'TRACE' : 'trace', 'DEBUG' : 'debug', 'INFO' : 'info', 'WARN' : 'warn', 'ERROR' : 'error', 'OFF' : 'info'} if not known_level.has_key(new_level): raise common.ArgumentError("Unknown log level %s (use one of %s)" % (new_level, " ".join(known_level))) new_log_level = known_level[new_level] # TODO class_name can be validated against help-loggers if class_name: self.__classes_log_level[class_name] = new_log_level else: self.__global_log_level = new_log_level return self
def add(self, node, is_seed, data_center=None): if node.name in self.nodes: raise common.ArgumentError('Cannot create existing node %s' % node.name) self.nodes[node.name] = node if is_seed: self.seeds.append(node) self.__update_config() node.data_center = data_center node.set_log_level(self.__log_level) node._save() if data_center is not None: self.__update_topology_files() return self
def get_sstables(self, keyspace, column_family): keyspace_dir = os.path.join(self.get_path(), 'data', keyspace) if not os.path.exists(keyspace_dir): raise common.ArgumentError("Unknown keyspace {0}".format(keyspace)) version = self.cluster.version() # data directory layout is changed from 1.1 if float(version[:version.index('.')+2]) < 1.1: files = glob.glob(os.path.join(keyspace_dir, "{0}*-Data.db".format(column_family))) else: files = glob.glob(os.path.join(keyspace_dir, column_family or "*", "%s-%s*-Data.db" % (keyspace, column_family))) for f in files: if os.path.exists(f.replace('Data.db', 'Compacted')): files.remove(f) return files
def set_log_level(self, new_level, class_name=None): known_level = [ 'TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR' ] if new_level not in known_level: raise common.ArgumentError("Unknown log level %s (use one of %s)" % (new_level, " ".join(known_level))) if class_name: self.__classes_log_level[class_name] = new_level else: self.__global_log_level = new_level version = self.cluster.version() #loggers changed > 2.1 if float(version[:version.index('.')+2]) < 2.1: self.__update_log4j() else: self.__update_logback() return self
def bulkload(self, options): livenodes = [node for node in self.nodes.values() if node.is_live()] if not livenodes: raise common.ArgumentError("No live node") random.choice(livenodes).bulkload(options)
def set_dse_configuration_options(self, values=None): raise common.ArgumentError('Cannot set DSE configuration options on a Cassandra cluster')
def populate(self, nodes, debug=False, tokens=None, use_vnodes=None, ipprefix='127.0.0.', ipformat=None, install_byteman=False, use_single_interface=False): """Populate a cluster with nodes @use_single_interface : Populate the cluster with nodes that all share a single network interface. """ if self.cassandra_version() < '4' and use_single_interface: raise common.ArgumentError( 'use_single_interface is not supported in versions < 4.0') node_count = nodes dcs = [] if use_vnodes is None: self.use_vnodes = len( tokens or []) > 1 or self._more_than_one_token_configured() else: self.use_vnodes = use_vnodes if isinstance(nodes, list): self.set_configuration_options( values={ 'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch' }) node_count = 0 i = 0 for c in nodes: i = i + 1 node_count = node_count + c for x in xrange(0, c): dcs.append('dc%d' % i) if node_count < 1: raise common.ArgumentError('invalid node count %s' % nodes) for i in xrange(1, node_count + 1): if 'node%s' % i in list(self.nodes.values()): raise common.ArgumentError( 'Cannot create existing node node%s' % i) if tokens is None: if self.use_vnodes: # from 4.0 tokens can be pre-generated via the `allocate_tokens_for_local_replication_factor: 3` strategy # this saves time, as allocating tokens during first start is slow and non-concurrent if self.can_generate_tokens( ) and not 'CASSANDRA_TOKEN_PREGENERATION_DISABLED' in self._environment_variables: if len(dcs) <= 1: for x in xrange(0, node_count): dcs.append('dc1') tokens = self.generated_tokens(dcs) else: common.debug("using balanced tokens for non-vnode cluster") if len(dcs) <= 1: tokens = self.balanced_tokens(node_count) else: tokens = self.balanced_tokens_across_dcs(dcs) if not ipformat: ipformat = ipprefix + "%d" for i in xrange(1, node_count + 1): tk = None if tokens is not None and i - 1 < len(tokens): tk = tokens[i - 1] dc = dcs[i - 1] if i - 1 < len(dcs) else None binary = None if self.cassandra_version() >= '1.2': if use_single_interface: #Always leave 9042 and 9043 clear, in case someone defaults to adding # a node with those ports binary = (ipformat % 1, 9042 + 2 + (i * 2)) else: binary = (ipformat % i, 9042) thrift = None if self.cassandra_version() < '4': thrift = (ipformat % i, 9160) storage_interface = ((ipformat % i), 7000) if use_single_interface: #Always leave 7000 and 7001 in case someone defaults to adding #with those port numbers storage_interface = (ipformat % 1, 7000 + 2 + (i * 2)) node = self.create_node( name='node%s' % i, auto_bootstrap=False, thrift_interface=thrift, storage_interface=storage_interface, jmx_port=str(7000 + i * 100), remote_debug_port=str(2000 + i * 100) if debug else str(0), byteman_port=str(4000 + i * 100) if install_byteman else str(0), initial_token=tk, binary_interface=binary, environment_variables=self._environment_variables) self.add(node, True, dc) self._update_config() return self