def get_controller_info(ssh_access, net, res_col, retry_count): if not ssh_access: return LOG.info('Fetching OpenStack deployment details...') sshcon = sshutils.SSH(ssh_access, connect_retry_count=retry_count) if sshcon is None: LOG.error('Cannot connect to the controller node') return res = {} res['distro'] = sshcon.get_host_os_version() res['openstack_version'] = sshcon.check_openstack_version() res['cpu_info'] = sshcon.get_cpu_info() if net: l2type = res_col.get_result('l2agent_type') encap = res_col.get_result('encapsulation') if l2type: if encap: res['nic_name'] = sshcon.get_nic_name(l2type, encap, net.internal_iface_dict) res['l2agent_version'] = sshcon.get_l2agent_version(l2type) # print results CONLOG.info(res_col.ppr.pformat(res)) FILELOG.info(json.dumps(res, sort_keys=True)) res_col.add_properties(res)
def ext_host_tp_test(self): client = PerfInstance('Host-' + self.config.ext_host.host + '-Client', self.config) if not client.setup_ssh(self.config.ext_host): client.display('SSH to ext host failed, check IP or make sure public key is configured') else: client.buginf('SSH connected') client.create() FlowPrinter.print_desc('External-VM (upload/download)') target_ip = self.server.ssh_access.host if self.config.same_network_only: target_ip = self.server.internal_ip res = client.run_client('External-VM', target_ip, self.server, bandwidth=self.config.vm_bandwidth, bidirectional=True) if res: self.rescol.add_flow_result(res) CONLOG.info(self.rescol.ppr.pformat(res)) FILELOG.info(json.dumps(res, sort_keys=True)) client.dispose()
def measure_flow(self, label, target_ip): label = self.add_location(label) FlowPrinter.print_desc(label) # results for this flow as a dict perf_output = self.client.run_client(label, target_ip, self.server, bandwidth=self.config.vm_bandwidth, az_to=self.server.az) if self.config.keep_first_flow_and_exit: CONLOG.info(self.rescol.ppr.pformat(perf_output)) FILELOG.info(json.dumps(perf_output, sort_keys=True)) LOG.info('Stopping execution after first flow, cleanup all VMs/networks manually') sys.exit(0) if self.config.stop_on_error: # check if there is any error in the results results_list = perf_output['results'] for res_dict in results_list: if 'error' in res_dict: LOG.error('Stopping execution on error, cleanup all VMs/networks manually') CONLOG.info(self.rescol.ppr.pformat(perf_output)) FILELOG.info(json.dumps(perf_output, sort_keys=True)) sys.exit(2) self.rescol.add_flow_result(perf_output) CONLOG.info(self.rescol.ppr.pformat(perf_output)) FILELOG.info(json.dumps(perf_output, sort_keys=True))
def print_report(results): # In order to parse the results with less logic, we are encoding the results as below: # Same Network = 0, Different Network = 1 # Fixed IP = 0, Floating IP = 1 # Intra-node = 0, Inter-node = 1 SPASS = "******" SFAIL = "\033[91mFAILED\033[0m" # Initilize a run_status[4][2][2][4] array run_status = [([([(["SKIPPED"] * 4) for i in range(2)]) for i in range(2)]) for i in range(4)] run_data = [([([([{}] * 4) for i in range(2)]) for i in range(2)]) for i in range(4)] flows = results['flows'] for flow in flows: res = flow['results'] if flow['desc'].find('External-VM') != -1: for item in res: if 'direction' not in item: run_status[2][0][0][0] = SPASS if 'error' not in item else SFAIL if run_status[2][0][0][0] == SPASS: run_data[2][0][0][0] = gen_report_data('Upload', res) else: run_status[2][0][0][1] = SPASS if 'error' not in item else SFAIL if run_status[2][0][0][1] == SPASS: run_data[2][0][0][1] = gen_report_data('Download', res) continue idx0 = 0 if flow['desc'].find('same network') != -1 else 1 idx1 = 0 if flow['desc'].find('fixed IP') != -1 else 1 idx2 = 0 if flow['desc'].find('intra-node') != -1 else 1 if flow['desc'].find('Native') != -1: idx0 = 3 idx1 = idx2 = 0 for item in res: for idx3, proto in enumerate(['TCP', 'UDP', 'ICMP', 'Multicast']): if (item['protocol'] == proto) and (run_status[idx0][idx1][idx2][idx3] != SFAIL): if 'error' in item: run_status[idx0][idx1][idx2][idx3] = SFAIL else: run_status[idx0][idx1][idx2][idx3] = SPASS run_data[idx0][idx1][idx2][idx3] = gen_report_data(proto, res) table = [] scenario = 0 for idx0, net in enumerate(['Same Network', 'Different Network']): for idx1, ip in enumerate(['Fixed IP', 'Floating IP']): if net == 'Same Network' and ip == 'Floating IP': continue for idx2, node in enumerate(['Intra-node', 'Inter-node']): for idx3, proto in enumerate(['TCP', 'UDP', 'ICMP', 'Multicast']): row = [str(scenario / 4 + 1) + "." + str(idx3 + 1), "%s, %s, %s, %s" % (net, ip, node, proto), run_status[idx0][idx1][idx2][idx3], run_data[idx0][idx1][idx2][idx3]] table.append(row) scenario = scenario + 1 for idx3, proto in enumerate(['TCP', 'UDP', 'ICMP', 'Multicast']): row = [str(scenario / 4 + 1) + "." + str(idx3 + 1), "Native Throughput, %s" % (proto), run_status[3][0][0][idx3], run_data[3][0][0][idx3]] table.append(row) scenario = scenario + 1 table.append(['8.1', 'VM to Host Uploading', run_status[2][0][0][0], run_data[2][0][0][0]]) table.append(['8.2', 'VM to Host Downloading', run_status[2][0][0][1], run_data[2][0][0][1]]) ptable = zip(*table[1:])[2] cnt_passed = ptable.count(SPASS) cnt_failed = ptable.count(SFAIL) cnt_skipped = ptable.count("SKIPPED") cnt_valid = len(table) - 1 - cnt_skipped passed_rate = float(cnt_passed) / cnt_valid * 100 if cnt_valid != 0 else 0 failed_rate = float(cnt_failed) / cnt_valid * 100 if cnt_valid != 0 else 0 ptable = PrettyTable(['Scenario', 'Scenario Name', 'Functional Status', 'Data']) ptable.align = "l" ptable.max_width = 80 for row in table: ptable.add_row(row) summary = "Summary of results\n" summary += "==================\n" summary += "Total Scenarios: %d\n" % (len(table) - 1) summary += "Passed Scenarios: %d [%.2f%%]\n" % (cnt_passed, passed_rate) summary += "Failed Scenarios: %d [%.2f%%]\n" % (cnt_failed, failed_rate) summary += "Skipped Scenarios: %d\n" % (cnt_skipped) summary += str(ptable) CONLOG.info(summary) ls_summary = {"Result": results, "Total Scenarios": (len(table) - 1), "Passed Scenarios": "%d [%.2f%%]" % (cnt_passed, passed_rate), "Failed Scenarios": "%d [%.2f%%]" % (cnt_failed, failed_rate), "Skipped Scenarios": "%d" % (cnt_skipped)} FILELOG.info(json.dumps(ls_summary, sort_keys=True))
def print_desc(desc): global flow_num flow_num = flow_num + 1 CONLOG.info("=" * 60) LOG.info('Flow %d: %s', flow_num, desc)
def run_vmtp(opts): '''Run VMTP :param opts: Parameters that to be passed to VMTP in type argparse.Namespace(). See: http://vmtp.readthedocs.org/en/latest/usage.html#running-vmtp-as-a-library for examples of the usage on this API. :return: A dictionary which contains the results in details. ''' if (sys.argv == ['']): # Running from a Python call def_opts = parse_opts_from_cli() for key, value in vars(def_opts).iteritems(): if key not in opts: opts.__setattr__(key, value) config = merge_opts_to_configs(opts) rescol = ResultsCollector() # Run the native host tests if specified by user if opts.hosts: # A list of 0 to 2 HostSshAccess elements # remove any duplicate opts.hosts = list(set(opts.hosts)) native_hosts = [] if_name = None for host in opts.hosts: # decode and extract the trailing if name first # there is an if name if there are at least 2 ':' in the argument # e.g. "[email protected]:secret:eth0" if host.count(':') >= 2: last_column_index = host.rfind(':') # can be empty last_arg = host[last_column_index + 1:] if not if_name and last_arg: if_name = last_arg host = host[:last_column_index] native_hosts.append(get_ssh_access('host', host, config)) native_tp_results = test_native_tp(native_hosts, if_name, config) else: native_tp_results = [] for item in native_tp_results: rescol.add_flow_result(item) CONLOG.info(rescol.ppr.pformat(item)) FILELOG.info(json.dumps(item, sort_keys=True)) # Parse the credentials of the OpenStack cloud, and run the benchmarking cred = credentials.Credentials(opts.rc, opts.passwd, opts.no_env) if cred.rc_auth_url: if config.debug: LOG.info('Using ' + cred.rc_auth_url) vmtp_instance = VmtpTest(config, cred, rescol) vmtp_instance.run() vmtp_net = vmtp_instance.net # Retrieve controller information if requested # controller node ssh access to collect metadata for the run. ctrl_host_access = get_ssh_access('controller-node', opts.controller_node, config) get_controller_info(ctrl_host_access, vmtp_net, rescol, config.ssh_retry_count) # Print the report print_report(rescol.results) # Post-processing of the results, adding some metadata if cred.rc_auth_url: rescol.add_property('auth_url', cred.rc_auth_url) rescol.mask_credentials() rescol.generate_runid() if opts.test_description: rescol.add_property('test_description', opts.test_description) # Save results to a JSON file if config.json_file: rescol.save(config) # Save results to MongoDB if config.vmtp_mongod_ip: rescol.save_to_db(config) return rescol.results
def save(self, cfg): '''Save results in json format file.''' CONLOG.info('Saving results in json file: ' + cfg.json_file + "...") with open(cfg.json_file, 'w') as jfp: json.dump(self.results, jfp, indent=4, sort_keys=True)