def start(self, nodes, bucket, pnames, name, frequency, client_id='', collect_server_stats=True): self._task = { "state": "running", "threads": [], "name": name, "time": time.time(), "ops": [], "totalops": [], "ops-temp": [], "latency": {}, "data_size_stats": [] } rest = RestConnection(nodes[0]) info = rest.get_nodes_self() self.data_path = info.storage[0].get_data_path() self.client_id = str(client_id) if collect_server_stats: mbstats_thread = Thread(target=self.membase_stats, args=(nodes, bucket, frequency, self._verbosity)) mbstats_thread.start() sysstats_thread = Thread(target=self.system_stats, args=(nodes, pnames, frequency, self._verbosity)) sysstats_thread.start() diskstats_thread = Thread(target=self.disk_stats, args=(nodes, frequency, self._verbosity)) diskstats_thread.start() ns_server_stats_thread = Thread(target=self.ns_server_stats, args=([nodes[0]], bucket, 10, self._verbosity)) ns_server_stats_thread.start() rest = RestConnection(nodes[0]) bucket_size_thead = Thread(target=self.get_bucket_size, args=(bucket, rest, frequency)) bucket_size_thead.start() #data_size_thread = Thread(target=self.get_data_file_size, # args=(nodes, 60, bucket)) #data_size_thread.start() self._task["threads"] = [ sysstats_thread, ns_server_stats_thread, bucket_size_thead, mbstats_thread ] #data_size_thread ] # Getting build/machine stats from only one node in the cluster self.build_stats(nodes) self.machine_stats(nodes)
def disk_stats(self, nodes, frequency, verbosity=False): shells = [] for node in nodes: try: bucket = RestConnection(node).get_buckets()[0].name MemcachedClientHelper.direct_client(node, bucket) shells.append(RemoteMachineShellConnection(node)) except: pass d = {"snapshots": []} # "pname":"x","pid":"y","snapshots":[{"time":time,"value":value}] start_time = str(self._task["time"]) while not self._aborted(): time.sleep(frequency) i = 0 for shell in shells: node = nodes[i] unique_id = node.ip + '-' + start_time value = self._extrace_disk_states(shell) value["unique_id"] = unique_id value["time"] = time.time() value["ip"] = node.ip d["snapshots"].append(value) i += 1 self._task["diskstats"] = d["snapshots"] self.log.info(" finished diskstats")
def _rest(self): self.servers_lock.acquire() server_info = deepcopy(self.servers[0]) self.servers_lock.release() server_info['username'] = self.rest_username server_info['password'] = self.rest_password rest = RestConnection(server_info) return rest
def __init__(self, host=None, port=None, username=None, password=None): """Initialize memcache and couchbase clients""" server = { 'ip': host or settings.COUCHBASE['HOST'], 'port': port or settings.COUCHBASE['PORT'], 'username': username or settings.COUCHBASE['USER'], 'password': password or settings.COUCHBASE['PASSWORD'] } self.rest_client = RestConnection(server) self.mc_client = memcache.Client([server['ip'] + ':11211'], debug=0)
def __init__(self, url, bucket, password="", verbose=False): self.log = logger.logger("VBucketAwareMemcachedClient") self.bucket = bucket self.rest_username = bucket self.rest_password = password self._memcacheds = {} self._vBucketMap = {} self._vBucketMap_lock = Lock() self._vBucketMapFastForward = {} self._vBucketMapFastForward_lock = Lock() #TODO: use regular expressions to parse the url server = {} if not bucket: raise InvalidArgumentException("bucket can not be an empty string", parameters="bucket") if not url: raise InvalidArgumentException("url can not be an empty string", parameters="url") if url.find("http://") != -1 and url.rfind(":") != -1 and url.find( "/pools/default") != -1: server["ip"] = url[url.find("http://") + len("http://"):url.rfind(":")] server["port"] = url[url.rfind(":") + 1:url.find("/pools/default")] server["username"] = self.rest_username server["password"] = self.rest_password self.servers = [server] self.servers_lock = Lock() self.rest = RestConnection(server) self.reconfig_vbucket_map() self.init_vbucket_connections() self.dispatcher = CommandDispatcher(self) self.dispatcher_thread = Thread(name="dispatcher-thread", target=self._start_dispatcher) self.dispatcher_thread.daemon = True self.dispatcher_thread.start() self.streaming_thread = Thread(name="streaming", target=self._start_streaming, args=()) self.streaming_thread.daemon = True self.streaming_thread.start() self.verbose = verbose
def system_stats(self, nodes, pnames, frequency, verbosity=False): try: shells = [] for node in nodes: try: bucket = RestConnection(node).get_buckets()[0].name MemcachedClientHelper.direct_client(node, bucket) shells.append(RemoteMachineShellConnection(node)) except: pass d = {"snapshots": []} # "pname":"x","pid":"y","snapshots":[{"time":time,"value":value}] start_time = str(self._task["time"]) while not self._aborted(): time.sleep(frequency) current_time = time.time() i = 0 for shell in shells: node = nodes[i] unique_id = node.ip + '-' + start_time for pname in pnames: obj = RemoteMachineHelper(shell).is_process_running( pname) if obj and obj.pid: value = self._extract_proc_info(shell, obj.pid) value["name"] = pname value["id"] = obj.pid value["unique_id"] = unique_id value["time"] = current_time value["ip"] = node.ip d["snapshots"].append(value) i += 1 self._task["systemstats"] = d["snapshots"] self.log.info(" finished system_stats") except: self._aborted()
def build_info(node): rest = RestConnection(node) api = rest.baseUrl + 'nodes/self' status, content = rest._http_request(api) json_parsed = json.loads(content) return json_parsed
def membase_stats(self, nodes, bucket, frequency, verbose=False): try: mcs = [] for node in nodes: try: bucket = RestConnection(node).get_buckets()[0].name mcs.append( MemcachedClientHelper.direct_client(node, bucket)) except: pass self._task["membasestats"] = [] self._task["timings"] = [] self._task["dispatcher"] = [] self._task["tap"] = [] self._task["checkpoint"] = [] d = {} # "pname":"x","pid":"y","snapshots":[{"time":time,"value":value}] for mc in mcs: d[mc.host] = { "snapshots": [], "timings": [], "dispatcher": [], "tap": [], "checkpoint": [] } while not self._aborted(): time_left = frequency # at minimum we want to check for aborted every minute while not self._aborted() and time_left > 0: time.sleep(min(time_left, 60)) if time_left >= 60: time_left -= 60 else: time_left = 0 for mc in mcs: stats = mc.stats() stats["time"] = time.time() stats["ip"] = mc.host d[mc.host]["snapshots"].append(stats) timings = mc.stats('timings') d[mc.host]["timings"].append(timings) dispatcher = mc.stats('dispatcher') d[mc.host]["dispatcher"].append(dispatcher) tap = mc.stats('tap') d[mc.host]["tap"].append(tap) checkpoint = mc.stats('checkpoint') d[mc.host]["checkpoint"].append(checkpoint) print len(d[mc.host]["checkpoint"]) start_time = str(self._task["time"]) for mc in mcs: ip = mc.host unique_id = ip + '-' + start_time current_time = time.time() for snapshot in d[mc.host]["snapshots"]: snapshot['unique_id'] = unique_id snapshot['time'] = current_time snapshot['ip'] = ip self._task["membasestats"].append(snapshot) for timing in d[mc.host]["timings"]: timing['unique_id'] = unique_id timing['time'] = current_time timing['ip'] = ip self._task["timings"].append(timing) for dispatcher in d[mc.host]["dispatcher"]: dispatcher['unique_id'] = unique_id dispatcher['time'] = current_time dispatcher['ip'] = ip self._task["dispatcher"].append(dispatcher) for tap in d[mc.host]["tap"]: tap['unique_id'] = unique_id tap['time'] = current_time tap['ip'] = ip self._task["tap"].append(tap) for checkpoint in d[mc.host]["checkpoint"]: checkpoint['unique_id'] = unique_id checkpoint['time'] = current_time checkpoint['ip'] = ip self._task["checkpoint"].append(checkpoint) self.log.info(" finished membase_stats") except: self._aborted()