def measure_indexing_throughput(self, nodes): self._task['indexer_info'] = list() indexers = defaultdict(dict) while not self._aborted(): time.sleep(15) # 15 seconds by default # Grab indexer tasks from all nodes tasks = list() for node in nodes: rest = RestConnection(node) tasks.extend(filter(lambda t: t['type'] == 'indexer', rest.active_tasks())) # Calculate throughput for every unique PID thr = 0 for task in tasks: uiid = task['pid'] + str(task['started_on']) changes_delta = \ task['changes_done'] - indexers[uiid].get('changes_done', 0) time_delta = \ task['updated_on'] - indexers[uiid].get('updated_on', task['started_on']) if time_delta: thr += changes_delta / time_delta indexers[uiid]['changes_done'] = task['changes_done'] indexers[uiid]['updated_on'] = task['updated_on'] # Average throughput self._task['indexer_info'].append({ 'indexing_throughput': thr, 'timestamp': time.time() })
def measure_indexing_throughput(self, nodes): self._task['indexer_info'] = list() indexers = defaultdict(dict) while not self._aborted(): time.sleep(15) # 15 seconds by default # Grab indexer tasks from all nodes tasks = list() for node in nodes: rest = RestConnection(node) tasks.extend( filter(lambda t: t['type'] == 'indexer', rest.active_tasks())) # Calculate throughput for every unique PID thr = 0 for task in tasks: uiid = task['pid'] + str(task['started_on']) changes_delta = \ task['changes_done'] - indexers[uiid].get('changes_done', 0) time_delta = \ task['updated_on'] - indexers[uiid].get('updated_on', task['started_on']) if time_delta: thr += changes_delta / time_delta indexers[uiid]['changes_done'] = task['changes_done'] indexers[uiid]['updated_on'] = task['updated_on'] # Average throughput self._task['indexer_info'].append({ 'indexing_throughput': thr, 'timestamp': time.time() })
def perform_cb_collect(_input, log_path=None): import logger log = logger.Logger.get_logger() for node in _input.servers: params = dict() if len(_input.servers) != 1: params['nodes'] = 'ns_1@' + node.ip else: # In case of single node we have to pass ip as below params['nodes'] = 'ns_1@' + '127.0.0.1' log.info('Collecting log on node ' + node.ip) rest = RestConnection(node) status, _, _ = rest.perform_cb_collect(params) time.sleep( 10 ) # This is needed as it takes a few seconds before the collection start log.info('CB collect status on %s is %s' % (node.ip, status)) log.info('Polling active task endpoint to check CB collect status') if status is True: cb_collect_response = {} while True: content = rest.active_tasks() for response in content: if response['type'] == 'clusterLogsCollection': cb_collect_response = response break if cb_collect_response['status'] == 'completed': log.info(cb_collect_response) break else: time.sleep( 10 ) # CB collect in progress, wait for 10 seconds and check progress again log.info('Copy CB collect ZIP file to Client') remote_client = RemoteMachineShellConnection(node) cb_collect_path = cb_collect_response['perNode'][ params['nodes']]['path'] zip_file_copied = remote_client.get_file( os.path.dirname(cb_collect_path), os.path.basename(cb_collect_path), log_path) log.info('%s node cb collect zip coped on client : %s' % (node.ip, zip_file_copied)) if zip_file_copied: remote_client.execute_command("rm -f %s" % cb_collect_path) remote_client.disconnect()