def download_version(version, url=None, verbose=False): u = "%s/%s/apache-cassandra-%s-src.tar.gz" % ( ARCHIVE, version.split('-')[0], version) if url is None else url _, target = tempfile.mkstemp(suffix=".tar.gz", prefix="ccm-") try: __download(u, target, show_progress=verbose) if verbose: print "Extracting %s as version %s ..." % (target, version) tar = tarfile.open(target) dir = tar.next().name.split("/")[0] tar.extractall(path=__get_dir()) tar.close() target_dir = os.path.join(__get_dir(), version) if os.path.exists(target_dir): shutil.rmtree(target_dir) shutil.move(os.path.join(__get_dir(), dir), target_dir) compile_version(version, target_dir, verbose=verbose) except urllib2.URLError as e: msg = "Invalid version %s" % version if url is None else "Invalid url %s" % url msg = msg + " (underlying error is: %s)" % str(e) raise common.ArgumentError(msg) except tarfile.ReadError as e: raise common.ArgumentError("Unable to uncompress downloaded file: %s" % str(e))
def populate(self, nodes, debug=False, tokens=None, use_vnodes=False, ipprefix='127.0.0.'): node_count = nodes dcs = [] if isinstance(nodes, list): self.set_configuration_options( values={ 'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch' }) node_count = 0 i = 0 for c in nodes: i = i + 1 node_count = node_count + c for x in xrange(0, c): dcs.append('dc%d' % i) if node_count < 1: raise common.ArgumentError('invalid node count %s' % nodes) for i in xrange(1, node_count + 1): if 'node%s' % i in self.nodes.values(): raise common.ArgumentError( 'Cannot create existing node node%s' % i) if tokens is None and not use_vnodes: tokens = self.balanced_tokens(node_count) for i in xrange(1, node_count + 1): tk = None if tokens is not None and i - 1 < len(tokens): tk = tokens[i - 1] dc = dcs[i - 1] if i - 1 < len(dcs) else None binary = None if self.version() >= '1.2': binary = ('%s%s' % (ipprefix, i), 9042) node = Node('node%s' % i, self, False, ('%s%s' % (ipprefix, i), 9160), ('%s%s' % (ipprefix, i), 7000), str(7000 + i * 100), (str(0), str(2000 + i * 100))[debug == True], tk, binary_interface=binary) self.add(node, True, dc) self.__update_config() return self
def set_log_level(self, new_level): known_level = ['TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR'] if new_level not in known_level: raise common.ArgumentError("Unknown log level %s (use one of %s)" % (new_level, " ".join(known_level))) self.__log_level = new_level self.__update_log4j() return self
def set_log_level(self, new_level, class_name=None): known_level = [ 'TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR' ] if new_level not in known_level: raise common.ArgumentError("Unknown log level %s (use one of %s)" % (new_level, " ".join(known_level))) self.__log_level = new_level self.__update_config() for node in self.nodelist(): node.set_log_level(new_level, class_name)
def set_log_level(self, new_level, class_name=None): known_level = ['TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR'] if new_level not in known_level: raise common.ArgumentError("Unknown log level %s (use one of %s)" % (new_level, " ".join(known_level))) if class_name: self.__classes_log_level[class_name] = new_level else: self.__global_log_level = new_level self.__update_log4j() return self
def add(self, node, is_seed, data_center=None): if node.name in self.nodes: raise common.ArgumentError('Cannot create existing node %s' % node.name) self.nodes[node.name] = node if is_seed: self.seeds.append(node) self.__update_config() node.data_center = data_center node.set_log_level(self.__log_level) node._save() if data_center is not None: self.__update_topology_files() return self
def get_sstables(self, keyspace, column_family): keyspace_dir = os.path.join(self.get_path(), 'data', keyspace) if not os.path.exists(keyspace_dir): raise common.ArgumentError("Unknown keyspace {0}".format(keyspace)) version = self.cluster.version() # data directory layout is changed from 1.1 if float(version[:version.index('.')+2]) < 1.1: files = glob.glob(os.path.join(keyspace_dir, "{0}*-Data.db".format(column_family))) else: files = glob.glob(os.path.join(keyspace_dir, column_family or "*", "%s-%s*-Data.db" % (keyspace, column_family))) for f in files: if os.path.exists(f.replace('Data.db', 'Compacted')): files.remove(f) return files
def set_log_level(self, new_level, class_name=None): known_level = [ 'TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR' ] if new_level not in known_level: raise common.ArgumentError("Unknown log level %s (use one of %s)" % (new_level, " ".join(known_level))) if class_name: self.__classes_log_level[class_name] = new_level else: self.__global_log_level = new_level version = self.cluster.version() #loggers changed > 2.1 if float(version[:version.index('.')+2]) < 2.1: self.__update_log4j() else: self.__update_logback() return self
def run_cli(self, cmds=None, show_output=False, cli_options=[]): livenodes = [node for node in self.nodes.values() if node.is_live()] if len(livenodes) == 0: raise common.ArgumentError("No live node") livenodes[0].run_cli(cmds, show_output, cli_options)