def update_orchestrator(self, ref_node=None, retry=5): if len(self.nodes) > 0: if ref_node is None: ref_node = self.nodes[0] address = {'server_ip': ref_node.ip, 'port': ref_node.port} rest = create_rest(**address) command = "node(global:whereis_name(ns_orchestrator))" status, content = rest.diag_eval(command) if status == True: content = re.sub(r".*@", "", content).strip("'").split(':') orchestrator_ip, orchestrator_port = \ content[0], content[1] if len(content) > 1 else cfg.COUCHBASE_PORT # look up matching node in self nodes for node in self.nodes: if node.ip == orchestrator_ip and \ int(node.port) == int(orchestrator_port): self.orchestrator = node break elif retry > 0: # wait time.sleep(5) # select random node and retry ref_node = self.nodes[random.randint(0, len(self.nodes) - 1)] retry = retry - 1 return self.update_orchestrator(ref_node, retry)
def resource_monitor(): rest = create_rest() nodes = rest.node_statuses() # cache sample of latest stats on all nodes for node in nodes: # check if atop running (could be new node) if isinstance(node.ip, unicode): node.ip = str(node.ip) if check_atop_proc(node.ip): restart_atop(node.ip) # retrieve stats from cache node_stats = NodeStats.from_cache(node.ip) if node_stats is None: node_stats = NodeStats(node.ip) # get stats from node sample = get_atop_sample(node.ip) # update collection with cbstats sample.update(get_cbstat_sample(node.ip)) # update collection with cbstats sample.update(get_du_sample(node.ip)) # update node state object update_node_stats(node_stats, sample) return True
def update_orchestrator(self, ref_node = None, retry = 5): if len(self.nodes) > 0: if ref_node is None: ref_node = self.nodes[0] address = {'server_ip' : ref_node.ip, 'port' : ref_node.port} rest = create_rest(**address) command = "node(global:whereis_name(ns_orchestrator))" status, content = rest.diag_eval(command) if status == True: content = re.sub(r".*@", "", content).strip("'").split(':') orchestrator_ip, orchestrator_port = \ content[0], content[1] if len(content) > 1 else cfg.COUCHBASE_PORT # look up matching node in self nodes for node in self.nodes: if node.ip == orchestrator_ip and \ int(node.port) == int(orchestrator_port): self.orchestrator = node break elif retry > 0: # wait time.sleep(5) # select random node and retry ref_node = self.nodes[random.randint(0, len(self.nodes) - 1)] retry = retry - 1 return self.update_orchestrator(ref_node, retry)
def __init__(self, bucket = "default", addr = cfg.COUCHBASE_IP +":"+cfg.COUCHBASE_PORT, username = cfg.COUCHBASE_USER, password = cfg.COUCHBASE_PWD): self.ip, self.port = addr.split(":") self.username = username self.password = password self.bucket = bucket self.rest = create_rest(self.ip, self.port, self.username, self.password)
def __init__(self, addr, bucket="default", username="******", password="******"): self.ip, self.port = addr.split(":") self.username = username self.password = password self.bucket = bucket self.rest = create_rest(self.ip, self.port, self.username, self.password)
def __init__(self, bucket="default", addr=cfg.COUCHBASE_IP + ":" + cfg.COUCHBASE_PORT, username=cfg.COUCHBASE_USER, password=cfg.COUCHBASE_PWD): self.ip, self.port = addr.split(":") self.username = username self.password = password self.bucket = bucket self.rest = create_rest(self.ip, self.port, self.username, self.password)
def atop_log_rollover(): """ task to run every 3 hours to roll over atop logs if atop is currently running it will be manually stopped and logs backed up before starting new instance""" logger.error("Rolling over logs") rest = create_rest() nodes = rest.node_statuses() for node in nodes: if check_atop_proc(node.ip): stop_atop(node.ip) backup_log(node.ip) start_atop(node.ip)
def node_rest(self, node=None): rest = None args = {'username': cfg.COUCHBASE_USER, 'password': cfg.COUCHBASE_PWD} if self.orchestrator is None: ip, port = cfg.COUCHBASE_IP, cfg.COUCHBASE_PORT elif node is None: ip, port = self.orchestrator.ip, self.orchestrator.port else: ip, port = node.ip, node.port args['server_ip'] = ip args['port'] = port rest = create_rest(**args) return rest
def node_rest(self, node = None): rest = None args = {'username' : cfg.COUCHBASE_USER, 'password' : cfg.COUCHBASE_PWD} if self.orchestrator is None: ip, port = cfg.COUCHBASE_IP, cfg.COUCHBASE_PORT elif node is None: ip, port = self.orchestrator.ip, self.orchestrator.port else: ip, port = node.ip, node.port args['server_ip'] = ip args['port'] = port rest = create_rest(**args) return rest
def update_orchestrator(self): if len(self.nodes) > 0: ref_node = self.nodes[0] address = {'server_ip' : ref_node.ip, 'port' : ref_node.port} rest = create_rest(**address) command = "node(global:whereis_name(ns_orchestrator))" status, content = rest.diag_eval(command) if status == True: content = re.sub(r".*@", "", content).strip("'").split(':') orchestrator_ip, orchestrator_port = \ content[0], content[1] if len(content) > 1 else cfg.COUCHBASE_PORT # look up matching node in self nodes for node in self.nodes: if node.ip == orchestrator_ip and \ int(node.port) == int(orchestrator_port): self.orchestrator = node break
def getDirectMC(key, ip, port = 8091, bucket = "default", password = ""): real_mc_client = None # get initial mc client client = MemcachedClient(ip, int(port)) vbId = (((zlib.crc32(key)) >> 16) & 0x7fff) & (client.vbucket_count - 1) # get vbucket map rest = create_rest(ip, port) vbuckets = rest.get_vbuckets(bucket) # find vbucket responsible to this key and mapping host if vbuckets is not None: vbucket = [vbucket for vbucket in vbuckets if vbucket.id == vbId] if len(vbucket) == 1: mc_ip, mc_port = vbucket[0].master.split(":") real_mc_client = MemcachedClient(mc_ip, int(mc_port)) real_mc_client.sasl_auth_plain(bucket, password) return real_mc_client
def __init__(self, active_task_type = "indexer", target_value = "_design/ddoc1", addr = cfg.COUCHBASE_IP +":"+cfg.COUCHBASE_PORT, username = cfg.COUCHBASE_USER, password = cfg.COUCHBASE_PWD): self.initialized = False self.id = cfg.CB_CLUSTER_TAG+"active_task_status" self.ip, self.port = addr.split(":") self.username = username self.password = password self.type = active_task_type self.target_value = target_value self.rest = create_rest(self.ip, self.port, self.username, self.password) self.task_started = False self.empty_stat_count = 0 self.known_types = ["design_documents", "view_compaction", "bucket_compaction", "indexer", "initial_build", "original_target"] self.initialized = True
def getDirectMC(key, ip, port=8091, bucket="default", password=""): real_mc_client = None # get initial mc client client = MemcachedClient(ip, int(port)) vbId = (((zlib.crc32(key)) >> 16) & 0x7fff) & (client.vbucket_count - 1) # get vbucket map rest = create_rest(ip, port) vbuckets = rest.get_vbuckets(bucket) # find vbucket responsible to this key and mapping host if vbuckets is not None: vbucket = [vbucket for vbucket in vbuckets if vbucket.id == vbId] if len(vbucket) == 1: mc_ip, mc_port = vbucket[0].master.split(":") real_mc_client = MemcachedClient(mc_ip, int(mc_port)) real_mc_client.sasl_auth_plain(bucket, password) return real_mc_client
def __init__(self, active_task_type="indexer", target_value="_design/ddoc1", addr=cfg.COUCHBASE_IP + ":" + cfg.COUCHBASE_PORT, username=cfg.COUCHBASE_USER, password=cfg.COUCHBASE_PWD): self.initialized = False self.id = cfg.CB_CLUSTER_TAG + "active_task_status" self.ip, self.port = addr.split(":") self.username = username self.password = password self.type = active_task_type self.target_value = target_value self.rest = create_rest(self.ip, self.port, self.username, self.password) self.task_started = False self.empty_stat_count = 0 self.known_types = [ "design_documents", "view_compaction", "bucket_compaction", "indexer", "initial_build", "original_target" ] self.initialized = True
def __init__(self, addr, bucket = "default", username = "******", password = "******"): self.ip, self.port = addr.split(":") self.username = username self.password = password self.bucket = bucket self.rest = create_rest(self.ip, self.port, self.username, self.password)