def clean(self, clean_options): if self.clean_needed(clean_options): if self.servers: for server in self.servers: try: LOG.info('Deleting instance %s...', server.name) self.nova_client.servers.delete(server.id) except Exception: LOG.exception("Instance %s deletion failed", server.name) LOG.info(' Waiting for %d instances to be fully deleted...', len(self.servers)) retry_count = 15 + len(self.servers) * 5 while True: retry_count -= 1 self.servers = [ server for server in self.servers if self.instance_exists(server) ] if not self.servers: break if retry_count: LOG.info( ' %d yet to be deleted by Nova, retries left=%d...', len(self.servers), retry_count) time.sleep(2) else: LOG.warning( ' instance deletion verification time-out: %d still not deleted', len(self.servers)) break
def clean(self, clean_options): if self.clean_needed(clean_options): if self.flavor: LOG.info("Deleting flavor %s...", self.flavor.name) try: self.flavor.delete() except Exception: LOG.exception("Flavor deletion failed")
def _collect_node_info_v1_1(node): svc = _read_svc_tmpl('svc_node.json', node.name) url = 'http://{}:12305/api/v1.1/machine'.format(node.name) try: resp, content = _post_dict(url, svc) LOG.debug(resp, content) if resp.status != 200: LOG.warning('Post failed <{}>'.format(node.name)) return except Exception as e: LOG.warning('Post failed <{}>'.format(e.message)) LOG.exception(e) return node_json = json.loads(content) node_json['mem_request_used'] = node.mem_request_used push_node_status('nodes', node.name, node_json, version=1.1)
def on_message(self, message): # LOG.debug(message) try: deserialized_message = Message.deserialize(message) except: return try: self.emitter.emit(deserialized_message.type, deserialized_message) except Exception as e: LOG.exception(e) traceback.print_exc(file=sys.stdout) pass for client in client_connections: client.write_message(message)
def clean(self, clean_options): if self.clean_needed(clean_options): for port in self.ports: LOG.info("Deleting port %s...", port['id']) try: self.neutron_client.delete_port(port['id']) except Exception: LOG.exception("Port deletion failed") # associated subnets are automatically deleted by neutron for net in self.networks: LOG.info("Deleting network %s...", net['name']) try: self.neutron_client.delete_network(net['id']) except Exception: LOG.exception("Network deletion failed")
def close(self): """Close this instance of chain runner and delete resources if applicable.""" try: if not self.config.no_cleanup: LOG.info('Cleaning up...') if self.chain_manager: self.chain_manager.delete() else: LOG.info('Clean up skipped.') try: self.traffic_client.close() except Exception: LOG.exception() if self.stats_manager: self.stats_manager.close() except Exception: LOG.exception('Cleanup not finished')
def run(self, host, port): # app.run will not return so we need to run it in a background thread so that # the calling thread (main thread) can keep doing work Thread(target=self.app.run, args=(host, port)).start() # wait for run requests # the runner must be executed from the main thread (Trex client library requirement) while True: # print 'main thread waiting for requests...' config = Ctx.dequeue() # print 'main thread processing request...' # print config try: # remove unfilled values as we do not want them to override default values with None config = {k: v for k, v in config.items() if v is not None} with RunLock(): if self.fluent_logger: self.fluent_logger.start_new_run() results = self.nfvbench_runner.run(config, config) except Exception as exc: results = result_json(STATUS_ERROR, str(exc)) LOG.exception('NFVbench runner exception:') # this might overwrite a previously unfetched result Ctx.set_result(results) try: summary = NFVBenchSummarizer(results['result'], self.fluent_logger) LOG.info(str(summary)) except KeyError: # in case of error, 'result' might be missing if 'error_message' in results: LOG.error(results['error_message']) else: LOG.error( 'REST request completed without results or error message' ) Ctx.release() if self.fluent_logger: self.fluent_logger.send_run_summary(True)
def clean(self, clean_options): if self.clean_needed(clean_options): # associated routes needs to be deleted before deleting routers for rtr in self.routers: LOG.info("Deleting routes for %s...", rtr['name']) try: body = {'router': {'routes': []}} self.neutron_client.update_router(rtr['id'], body) except Exception: LOG.exception("Router routes deletion failed") LOG.info("Deleting ports for %s...", rtr['name']) try: for port in self.ports: body = {'port_id': port['id']} self.neutron_client.remove_interface_router( rtr['id'], body) except Exception: LOG.exception("Router ports deletion failed") LOG.info("Deleting router %s...", rtr['name']) try: self.neutron_client.delete_router(rtr['id']) except Exception: LOG.exception("Router deletion failed")
def run_client_dir(self, target_ip, mss, reverse_dir=False, bandwidth_kbps=0, protocol='TCP', length=0, no_cpu_timed=0): '''Run client in one direction :param reverse_dir: True if reverse the direction (tcp only for now) :param bandwidth_kbps: transmit rate limit in Kbps :param protocol: (TCP|UDP|Multicast) :param length: length of network write|read buf (default 1K|8K/udp, 64K/tcp) for udp is the packet size :param no_cpu_timed: if non zero will disable cpu collection and override the time with the provided value - used mainly for udp to find quickly the optimal throughput using short tests at various throughput values :return: a list of 1 dictionary with the results (see parse_results()) ''' # run client using the default TCP window size (tcp window # scaling is normally enabled by default so setting explicit window # size is not going to help achieve better results) opts = '' multicast = protocol == 'Multicast' tcp = protocol == 'TCP' udp = protocol == 'UDP' if mss: opts += "-M" + str(mss) if reverse_dir: opts += " -F -r" if length: opts += " -l" + str(length) if self.instance.config.ipv6_mode: opts += " -6 " if multicast: opts += " -m32 -o -j -g" + self.instance.config.multicast_addr if not tcp: opts += " -u" # for UDP if the bandwidth is not provided we need to calculate # the optimal bandwidth if not bandwidth_kbps: udp_res = self.find_bdw(length, target_ip, protocol) if 'error' in udp_res: return [udp_res] if not self.instance.gmond_svr: # if we do not collect CPU we miught as well return # the results found through iteration return [udp_res] bandwidth_kbps = udp_res['throughput_kbps'] if bandwidth_kbps: opts += " -R%sK" % (bandwidth_kbps) if no_cpu_timed: duration_sec = no_cpu_timed else: duration_sec = self.instance.get_cmd_duration() # use data port 5001 and control port 5002 # must be enabled in the VM security group cmd = "%s -a -T%d %s -p5001 -P5002 -fparse %s" % ( self.dest_path, duration_sec, opts, target_ip) self.instance.buginf(cmd) try: if no_cpu_timed: # force the timeout value with 20 second extra for the command to # complete and do not collect CPU cpu_load = None cmd_out = self.instance.exec_command(cmd, duration_sec + 20) else: (cmd_out, cpu_load) = self.instance.exec_with_cpu(cmd) except sshutils.SSHError as exc: # Timout or any SSH error self.instance.display('SSH Error:' + str(exc)) return [self.parse_error(protocol, str(exc))] try: if udp or multicast: # UDP output: # megabytes=1.1924 real_seconds=10.01 rate_Mbps=0.9997 tx_cpu=99 rx_cpu=0 # drop=0 pkt=1221 data_loss=0.00000 re_udp = r'rate_Mbps=([\d\.]*) tx_cpu=\d* rx_cpu=\d* drop=(\-*\d*) pkt=(\d*)' if multicast: re_udp += r' data_loss=[\d\.]* msmaxjitter=([\d\.]*) msavgOWD=([\-\d\.]*)' match = re.search(re_udp, cmd_out) if match: rate_mbps = float(match.group(1)) drop = float(match.group(2)) pkt = int(match.group(3)) jitter = None if multicast: jitter = float(match.group(4)) # Workaround for a bug of nuttcp that sometimes it will return a # negative number for drop. if drop < 0: drop = 0 return [ self.parse_results(protocol, int(rate_mbps * 1024), lossrate=round(drop * 100 / pkt, 2), reverse_dir=reverse_dir, msg_size=length, cpu_load=cpu_load, jitter=jitter) ] else: # TCP output: # megabytes=1083.4252 real_seconds=10.04 rate_Mbps=905.5953 tx_cpu=3 rx_cpu=19 # retrans=0 cwnd=3202 rtt_ms=0.55 re_tcp = \ r'rate_Mbps=([\d\.]*) tx_cpu=\d* rx_cpu=\d*' \ ' retrans=(\d*) cwnd=\d* rtt_ms=([\d\.]*)' match = re.search(re_tcp, cmd_out) if match: rate_mbps = float(match.group(1)) retrans = int(match.group(2)) rtt_ms = float(match.group(3)) return [ self.parse_results(protocol, int(rate_mbps * 1024), retrans=retrans, rtt_ms=rtt_ms, reverse_dir=reverse_dir, msg_size=length, cpu_load=cpu_load) ] except Exception as exc: LOG.exception(cmd_out) self.instance.display('Parsing Error:' + str(exc)) return [ self.parse_error( protocol, "cmd=%s: out=%s: exc=%s" % (cmd, cmd_out, str(exc))) ] return [self.parse_error(protocol, 'Could not parse: %s' % (cmd_out))]
def __range_search(self, left, right, targets, results): """Perform a binary search for a list of targets inside a [left..right] range or rate. left the left side of the range to search as a % the line rate (100 = 100% line rate) indicating the rate to send on each interface right the right side of the range to search as a % of line rate indicating the rate to send on each interface targets a dict of drop rates to search (0.1 = 0.1%), indexed by the DR name or "tag" ('ndr', 'pdr') results a dict to store results """ if not targets: return LOG.info('Range search [%s .. %s] targets: %s', left, right, targets) # Terminate search when gap is less than load epsilon if right - left < self.config.measurement.load_epsilon: self.__targets_found(left, targets, results) return # Obtain the average drop rate in for middle load middle = (left + right) / 2.0 try: stats, rates = self.__run_search_iteration(middle) except STLError: LOG.exception("Got exception from traffic generator during binary search") self.__targets_found(left, targets, results) return # Split target dicts based on the avg drop rate left_targets = {} right_targets = {} for tag, target in targets.iteritems(): if stats['overall']['drop_rate_percent'] <= target: # record the best possible rate found for this target results[tag] = rates results[tag].update({ 'load_percent_per_direction': middle, 'stats': self.__format_output_stats(dict(stats)), 'timestamp_sec': None }) right_targets[tag] = target else: # initialize to 0 all fields of result for # the worst case scenario of the binary search (if ndr/pdr is not found) if tag not in results: results[tag] = dict.fromkeys(rates, 0) empty_stats = self.__format_output_stats(dict(stats)) for key in empty_stats: if isinstance(empty_stats[key], dict): empty_stats[key] = dict.fromkeys(empty_stats[key], 0) else: empty_stats[key] = 0 results[tag].update({ 'load_percent_per_direction': 0, 'stats': empty_stats, 'timestamp_sec': None }) left_targets[tag] = target # search lower half self.__range_search(left, middle, left_targets, results) # search upper half only if the upper rate does not exceed # 100%, this only happens when the first search at 100% # yields a DR that is < target DR if middle >= 100: self.__targets_found(100, right_targets, results) else: self.__range_search(middle, right, right_targets, results)