def __init__(self, grpc_port: str, grpc_root_server_port: str, signal_port: str, cmd: List[str], label: str, type_identifier: str, name: str, verbose_mode: bool): """Base GD device, common traits for both device based and host only GD cert tests :param grpc_port: main gRPC service port :param grpc_root_server_port: gRPC root server port :param signal_port: signaling port for backing process start up :param cmd: list of arguments to run in backing process :param label: device label used in logs :param type_identifier: device type identifier used in logs :param name: name of device used in logs """ # Must be at the first line of __init__ method values = locals() arguments = [ values[arg] for arg in inspect.getfullargspec(GdDeviceBase.__init__).args if arg != "verbose_mode" ] asserts.assert_true( all(arguments), "All arguments to GdDeviceBase must not be None nor empty") asserts.assert_true( all(cmd), "cmd list should not have None nor empty component") self.verbose_mode = verbose_mode self.grpc_root_server_port = int(grpc_root_server_port) self.grpc_port = int(grpc_port) self.signal_port = int(signal_port) self.name = name self.type_identifier = type_identifier self.label = label # logging.log_path only exists when this is used in an ACTS test run. self.log_path_base = get_current_context().get_full_output_path() self.test_runner_base_path = \ get_current_context().get_base_output_path() self.backing_process_log_path = os.path.join( self.log_path_base, '%s_%s_backing_logs.txt' % (self.type_identifier, self.label)) if "--btsnoop=" not in " ".join(cmd): cmd.append("--btsnoop=%s" % os.path.join( self.log_path_base, '%s_btsnoop_hci.log' % self.label)) if "--btsnooz=" not in " ".join(cmd): cmd.append("--btsnooz=%s" % os.path.join( self.log_path_base, '%s_btsnooz_hci.log' % self.label)) if "--btconfig=" not in " ".join(cmd): cmd.append("--btconfig=%s" % os.path.join( self.log_path_base, '%s_bt_config.conf' % self.label)) self.cmd = cmd self.environment = os.environ.copy() if "cert" in self.label: self.terminal_color = TerminalColor.BLUE else: self.terminal_color = TerminalColor.YELLOW
def teardown_class(self): if self.router_adv_daemon: self.router_adv_daemon.stop() try: output_path = context.get_current_context().get_base_output_path() test_class_name = context.get_current_context().test_class_name output_file(f'{output_path}/{test_class_name}/rvr_summary.html', title='RvR Sumamry') save(list(self.rvr_graph_summary)) except Exception as e: self.log.info('Unable to generate RvR summary file due ' 'to Exception') self.log.info(e)
def test_update_test_class_context_for_test_class_begin(self): event = Mock(spec=TestClassBeginEvent) event.test_class = Mock() _update_test_class_context(event) self.assertIsInstance(get_current_context(), TestClassContext) reset_context()
def setup_class(self): log_path_base = context.get_current_context().get_full_output_path() gd_devices = self.controller_configs.get("GdDevice") self.rootcanal_running = False if 'rootcanal' in self.controller_configs: self.rootcanal_running = True rootcanal_logpath = os.path.join(log_path_base, 'rootcanal_logs.txt') self.rootcanal_logs = open(rootcanal_logpath, 'w') rootcanal_config = self.controller_configs['rootcanal'] rootcanal_hci_port = str(rootcanal_config.get("hci_port", "6402")) android_host_out = os.environ.get('ANDROID_HOST_OUT') rootcanal = android_host_out + "/nativetest64/root-canal/root-canal" self.rootcanal_process = subprocess.Popen( [ rootcanal, str(rootcanal_config.get("test_port", "6401")), rootcanal_hci_port, str(rootcanal_config.get("link_layer_port", "6403")) ], cwd=ANDROID_BUILD_TOP, env=os.environ.copy(), stdout=self.rootcanal_logs, stderr=self.rootcanal_logs) for gd_device in gd_devices: gd_device["rootcanal_port"] = rootcanal_hci_port self.register_controller(importlib.import_module('cert.gd_device'), builtin=True) self.device_under_test = self.gd_devices[1] self.cert_device = self.gd_devices[0]
def process_traffic_continuity_results(self, testcase_params, result): """Function to process traffic results. The function looks for traffic gaps during a roaming test Args: testcase_params: dict containing all test results and meta data results_dict: dict containing consistency test results """ self.detect_roam_events(result) current_context = context.get_current_context().get_full_output_path() plot_file_path = os.path.join(current_context, self.current_test_name + '.html') if 'ping' in self.current_test_name: self.detect_ping_gaps(result) self.plot_ping_result(testcase_params, result, output_file_path=plot_file_path) elif 'iperf' in self.current_test_name: self.detect_iperf_gaps(result) self.plot_iperf_result(testcase_params, result, output_file_path=plot_file_path) results_file_path = os.path.join(current_context, self.current_test_name + '.json') with open(results_file_path, 'w') as results_file: json.dump(wputils.serialize_dict(result), results_file, indent=4)
def process_testclass_results(self): """Saves all test results to enable comparison.""" testclass_data = collections.OrderedDict() for test_result in self.testclass_results: current_params = test_result['testcase_params'] channel = current_params['channel'] channel_data = testclass_data.setdefault( channel, collections.OrderedDict(orientation=[], rssi=collections.OrderedDict( signal_poll_rssi=[], chain_0_rssi=[], chain_1_rssi=[]))) channel_data['orientation'].append(current_params['orientation']) channel_data['rssi']['signal_poll_rssi'].append( test_result['postprocessed_results']['signal_poll_rssi'] ['mean'][0]) channel_data['rssi']['chain_0_rssi'].append( test_result['postprocessed_results']['chain_0_rssi']['mean'] [0]) channel_data['rssi']['chain_1_rssi'].append( test_result['postprocessed_results']['chain_1_rssi']['mean'] [0]) # Publish test class metrics for channel, channel_data in testclass_data.items(): for rssi_metric, rssi_metric_value in channel_data['rssi'].items(): metric_name = 'ota_summary_ch{}.avg_{}'.format( channel, rssi_metric) metric_value = numpy.mean(rssi_metric_value) self.testclass_metric_logger.add_metric( metric_name, metric_value) # Plot test class results chamber_mode = self.testclass_results[0]['testcase_params'][ 'chamber_mode'] if chamber_mode == 'orientation': x_label = 'Angle (deg)' elif chamber_mode == 'stepped stirrers': x_label = 'Position Index' elif chamber_mode == 'StirrersOn': return plots = [] for channel, channel_data in testclass_data.items(): current_plot = wputils.BokehFigure( title='Channel {} - Rssi vs. Position'.format(channel), x_label=x_label, primary_y_label='RSSI (dBm)', ) for rssi_metric, rssi_metric_value in channel_data['rssi'].items(): legend = rssi_metric current_plot.add_line(channel_data['orientation'], rssi_metric_value, legend) current_plot.generate_figure() plots.append(current_plot) current_context = context.get_current_context().get_full_output_path() plot_file_path = os.path.join(current_context, 'results.html') wputils.BokehFigure.save_figures(plots, plot_file_path)
def setup_class(self, dut_module, cert_module): self.dut_module = dut_module self.cert_module = cert_module self.log_path_base = get_current_context().get_full_output_path() # Start root-canal if needed self.rootcanal_running = False if 'rootcanal' in self.controller_configs: self.rootcanal_running = True # Get root canal binary rootcanal = os.path.join(get_gd_root(), "root-canal") asserts.assert_true( os.path.isfile(rootcanal), "Root canal does not exist at %s" % rootcanal) # Get root canal log self.rootcanal_logpath = os.path.join(self.log_path_base, 'rootcanal_logs.txt') self.rootcanal_logs = open(self.rootcanal_logpath, 'w') # Make sure ports are available rootcanal_config = self.controller_configs['rootcanal'] rootcanal_test_port = int(rootcanal_config.get("test_port", "6401")) rootcanal_hci_port = int(rootcanal_config.get("hci_port", "6402")) rootcanal_link_layer_port = int( rootcanal_config.get("link_layer_port", "6403")) asserts.assert_true( make_ports_available((rootcanal_test_port, rootcanal_hci_port, rootcanal_link_layer_port)), "Failed to make root canal ports available") # Start root canal process self.rootcanal_process = subprocess.Popen( [ rootcanal, str(rootcanal_test_port), str(rootcanal_hci_port), str(rootcanal_link_layer_port) ], cwd=get_gd_root(), env=os.environ.copy(), stdout=self.rootcanal_logs, stderr=self.rootcanal_logs) asserts.assert_true( self.rootcanal_process, msg="Cannot start root-canal at " + str(rootcanal)) asserts.assert_true( is_subprocess_alive(self.rootcanal_process), msg="root-canal stopped immediately after running") # Modify the device config to include the correct root-canal port for gd_device_config in self.controller_configs.get("GdDevice"): gd_device_config["rootcanal_port"] = str(rootcanal_hci_port) # Parse and construct GD device objects self.register_controller( importlib.import_module('cert.gd_device'), builtin=True) self.dut = self.gd_devices[1] self.cert = self.gd_devices[0]
def process_testclass_results(self): """Saves all test results to enable comparison.""" WifiPingTest.process_testclass_results(self) range_vs_angle = collections.OrderedDict() for test in self.testclass_results: curr_params = test['testcase_params'] curr_config = curr_params['channel'] if curr_config in range_vs_angle: if curr_params['position'] not in range_vs_angle[curr_config][ 'position']: range_vs_angle[curr_config]['position'].append( curr_params['position']) range_vs_angle[curr_config]['range'].append(test['range']) range_vs_angle[curr_config]['llstats_at_range'].append( test['llstats_at_range']) else: range_vs_angle[curr_config]['range'][-1] = test['range'] range_vs_angle[curr_config]['llstats_at_range'][-1] = test[ 'llstats_at_range'] else: range_vs_angle[curr_config] = { 'position': [curr_params['position']], 'range': [test['range']], 'llstats_at_range': [test['llstats_at_range']] } chamber_mode = self.testclass_results[0]['testcase_params'][ 'chamber_mode'] if chamber_mode == 'orientation': x_label = 'Angle (deg)' elif chamber_mode == 'stepped stirrers': x_label = 'Position Index' figure = wputils.BokehFigure( title='Range vs. Position', x_label=x_label, primary_y_label='Range (dB)', ) for channel, channel_data in range_vs_angle.items(): figure.add_line(x_data=channel_data['position'], y_data=channel_data['range'], hover_text=channel_data['llstats_at_range'], legend='Channel {}'.format(channel)) average_range = sum(channel_data['range']) / len( channel_data['range']) self.log.info('Average range for Channel {} is: {}dB'.format( channel, average_range)) metric_name = 'ota_summary_ch{}.avg_range'.format(channel) self.testclass_metric_logger.add_metric(metric_name, average_range) current_context = context.get_current_context().get_full_output_path() plot_file_path = os.path.join(current_context, 'results.html') figure.generate_figure(plot_file_path) # Save results results_file_path = os.path.join(current_context, 'testclass_summary.json') with open(results_file_path, 'w') as results_file: json.dump(range_vs_angle, results_file, indent=4)
def log_path(self): current_context = context.get_current_context() full_out_dir = os.path.join(current_context.get_full_output_path(), 'sniffer_captures') # Ensure the directory exists. os.makedirs(full_out_dir, exist_ok=True) return full_out_dir
def log_path(self): current_context = context.get_current_context() full_out_dir = os.path.join(current_context.get_full_output_path(), 'IPerfServer%s' % self.port) # Ensure the directory exists. os.makedirs(full_out_dir, exist_ok=True) return full_out_dir
def test_rvr_11n_2g_20mhz_open_rx_ipv6(self): ssid = rand_ascii_str(20) setup_ap(access_point=self.access_point, profile_name='whirlwind', channel=hostapd_constants.AP_DEFAULT_CHANNEL_2G, ssid=ssid, setup_bridge=True) graph_data = self.run_rvr(ssid, band='2g', traffic_dir='rx', ip_version=6) for rvr_graph in create_rvr_graph( self.test_name, context.get_current_context().get_full_output_path(), graph_data): self.rvr_graph_summary.append(rvr_graph) write_csv_rvr_data( self.test_name, context.get_current_context().get_full_output_path(), graph_data)
def test_update_test_class_context_for_test_class_end(self): event = Mock(spec=TestClassBeginEvent) event.test_class = Mock() event2 = Mock(spec=TestClassEndEvent) event2.test_class = Mock() _update_test_class_context(event) _update_test_class_context(event2) self.assertIsInstance(get_current_context(), RootContext) reset_context()
def __get_current_output_dir(self, depth=ContextLevel.TESTCASE): """Gets the current output directory from the context system. Make the directory if it doesn't exist. Args: depth: The desired level of the output directory. For example, the TESTCLASS level would yield the directory associated with the current test class context, even if the test is currently within a test case. """ curr_context = context.get_current_context(depth) return curr_context.get_full_output_path(self.logger.name)
def test_rvr_11ac_5g_80mhz_wpa2_tx_ipv6(self): ssid = rand_ascii_str(20) password = rand_ascii_str(20) security_profile = Security(security_mode='wpa2', password=password) setup_ap(access_point=self.access_point, profile_name='whirlwind', channel=hostapd_constants.AP_DEFAULT_CHANNEL_5G, ssid=ssid, security=security_profile, setup_bridge=True) graph_data = self.run_rvr(ssid, password=password, band='5g', traffic_dir='tx', ip_version=6) for rvr_graph in create_rvr_graph( self.test_name, context.get_current_context().get_full_output_path(), graph_data): self.rvr_graph_summary.append(rvr_graph) write_csv_rvr_data( self.test_name, context.get_current_context().get_full_output_path(), graph_data)
def write_csv_time_to_reconnect(self, test_name, time_to_reconnect): """Writes the time to reconnect to a csv file. Args: test_name: the name of the test case time_to_reconnect: the time from when the rebooted device came back up to when it reassociated (or 'FAIL'), if it failed to reconnect. """ log_context = context.get_current_context() log_path = os.path.join(log_context.get_base_output_path(), 'WlanRebootTest/') csv_file_name = '%stime_to_reconnect.csv' % log_path self.log.info('Writing to %s' % csv_file_name) with open(csv_file_name, 'a') as csv_file: csv_file.write('%s,%s\n' % (test_name, time_to_reconnect))
def process_consistency_results(self, testcase_params, results_dict): """Function to process roaming consistency results. The function looks compiles the test of roams recorded in consistency tests and plots results for easy visualization. Args: testcase_params: dict containing all test results and meta data results_dict: dict containing consistency test results """ # make figure placeholder and get relevant functions if 'ping' in self.current_test_name: detect_gaps = self.detect_ping_gaps plot_result = self.plot_ping_result primary_y_axis = 'RTT (ms)' elif 'iperf' in self.current_test_name: detect_gaps = self.detect_iperf_gaps plot_result = self.plot_iperf_result primary_y_axis = 'Throughput (Mbps)' # loop over results roam_stats = collections.OrderedDict() current_context = context.get_current_context().get_full_output_path() for secondary_atten, results_list in results_dict.items(): figure = wputils.BokehFigure(title=self.current_test_name, x_label='Time (ms)', primary_y_label=primary_y_axis, secondary_y_label='RSSI (dBm)') roam_stats[secondary_atten] = collections.OrderedDict() for result in results_list: self.detect_roam_events(result) for roam_transition, count in result['roam_counts'].items(): roam_stats[secondary_atten][ roam_transition] = roam_stats[secondary_atten].get( roam_transition, 0) + count detect_gaps(result) plot_result(testcase_params, result, figure=figure) # save plot plot_file_name = (self.current_test_name + '_' + str(secondary_atten) + '.html') plot_file_path = os.path.join(current_context, plot_file_name) figure.save_figure(plot_file_path) results_dict['roam_stats'] = roam_stats results_file_path = os.path.join(current_context, self.current_test_name + '.json') with open(results_file_path, 'w') as results_file: json.dump(wputils.serialize_dict(result), results_file, indent=4)
def test_update_test_case_context_for_test_case_end(self): event = Mock(spec=TestClassBeginEvent) event.test_class = Mock() event2 = Mock(spec=TestCaseBeginEvent) event2.test_class = Mock() event2.test_case = Mock() event3 = Mock(spec=TestCaseEndEvent) event3.test_class = Mock() event3.test_case = Mock() _update_test_class_context(event) _update_test_case_context(event2) _update_test_case_context(event3) self.assertIsInstance(get_current_context(), TestClassContext) reset_context()
def log_usage(module_name, func_name): """Creates a dict key from the params and the current context, and uses them to update a counter for that key. Key is an instance of UsageMetadataKey, where method_name: module_name.func_name test_context: current_context.identifier Args: module_name: function module func_name: function name """ current_context = context.get_current_context() name = '.'.join([module_name, func_name]) usage_key = UsageMetadataKey(name, current_context.identifier) _usage_map[usage_key] = _usage_map.get(usage_key, 0) + 1
def take_bug_report(self, test_name, begin_time, additional_log_objects=None): """Takes a bug report on the device and stores it in a file. Args: test_name: Name of the test case that triggered this bug report. begin_time: Epoch time when the test started. additional_log_objects: A list of additional objects in Fuchsia to query in the bug report. Must be in the following format: /hub/c/scenic.cmx/[0-9]*/out/objects """ if not additional_log_objects: additional_log_objects = [] log_items = [] matching_log_items = FUCHSIA_DEFAULT_LOG_ITEMS for additional_log_object in additional_log_objects: if additional_log_object not in matching_log_items: matching_log_items.append(additional_log_object) br_path = context.get_current_context().get_full_output_path() os.makedirs(br_path, exist_ok=True) time_stamp = acts_logger.normalize_log_line_timestamp( acts_logger.epoch_to_log_line_timestamp(begin_time)) out_name = "FuchsiaDevice%s_%s" % ( self.serial, time_stamp.replace(" ", "_").replace(":", "-")) out_name = "%s.txt" % out_name full_out_path = os.path.join(br_path, out_name) self.log.info("Taking bugreport for %s on FuchsiaDevice%s." % (test_name, self.serial)) system_objects = self.send_command_ssh('iquery --find /hub').stdout system_objects = system_objects.split() for matching_log_item in matching_log_items: for system_object in system_objects: if re.match(matching_log_item, system_object): log_items.append(system_object) log_command = '%s %s' % (FUCHSIA_DEFAULT_LOG_CMD, ' '.join(log_items)) bug_report_data = self.send_command_ssh(log_command).stdout bug_report_file = open(full_out_path, 'w') bug_report_file.write(bug_report_data) bug_report_file.close()
def take_bt_snoop_log(self, custom_name=None): """Takes a the bt-snoop log from the device and stores it in a file in a pcap format. """ bt_snoop_path = context.get_current_context().get_full_output_path() time_stamp = acts_logger.normalize_log_line_timestamp( acts_logger.epoch_to_log_line_timestamp(time.time())) out_name = "FuchsiaDevice%s_%s" % ( self.serial, time_stamp.replace(" ", "_").replace(":", "-")) out_name = "%s.pcap" % out_name if custom_name: out_name = "%s.pcap" % custom_name else: out_name = "%s.pcap" % out_name full_out_path = os.path.join(bt_snoop_path, out_name) bt_snoop_data = self.send_command_ssh('bt-snoop-cli -d -f pcap').stdout bt_snoop_file = open(full_out_path, 'w') bt_snoop_file.write(bt_snoop_data) bt_snoop_file.close()
def setup_class(self, dut_module, cert_module): self.log_path_base = get_current_context().get_full_output_path() self.verbose_mode = bool(self.user_params.get('verbose_mode', False)) for config in self.controller_configs[CONTROLLER_CONFIG_NAME]: config['verbose_mode'] = self.verbose_mode self.info = setup_class_core( dut_module=dut_module, cert_module=cert_module, verbose_mode=self.verbose_mode, log_path_base=self.log_path_base, controller_configs=self.controller_configs) self.dut_module = self.info['dut_module'] self.cert_module = self.info['cert_module'] self.rootcanal_running = self.info['rootcanal_running'] self.rootcanal_logpath = self.info['rootcanal_logpath'] self.rootcanal_process = self.info['rootcanal_process'] if 'rootcanal' in self.controller_configs: asserts.assert_true( self.info['rootcanal_exist'], "Root canal does not exist at %s" % self.info['rootcanal']) asserts.assert_true(self.info['make_rootcanal_ports_available'], "Failed to make root canal ports available") self.log.debug("Running %s" % " ".join(self.info['rootcanal_cmd'])) asserts.assert_true(self.info['is_rootcanal_process_started'], msg="Cannot start root-canal at " + str(self.info['rootcanal'])) asserts.assert_true( self.info['is_subprocess_alive'], msg="root-canal stopped immediately after running") self.rootcanal_logger = self.info['rootcanal_logger'] self.controller_configs = self.info['controller_configs'] # Parse and construct GD device objects self.register_controller(importlib.import_module('cert.gd_device'), builtin=True) self.dut = self.gd_devices[1] self.cert = self.gd_devices[0]
def _get_full_file_path(tag=''): """Returns the full file path for the IPerfClient log file. Note: If the directory for the file path does not exist, it will be created. Args: tag: The tag passed in to the server run. """ current_context = context.get_current_context() full_out_dir = os.path.join(current_context.get_full_output_path(), 'iperf_client_files') with IPerfClientBase.__log_file_lock: os.makedirs(full_out_dir, exist_ok=True) tags = ['IPerfClient', tag, IPerfClientBase.__log_file_counter] out_file_name = '%s.log' % (','.join( [str(x) for x in tags if x != '' and x is not None])) IPerfClientBase.__log_file_counter += 1 return os.path.join(full_out_dir, out_file_name)
def __init__(self, grpc_port, grpc_root_server_port, signal_port, cmd, label, type_identifier, serial_number): self.label = label if label is not None else grpc_port # logging.log_path only exists when this is used in an ACTS test run. self.log_path_base = context.get_current_context( ).get_full_output_path() backing_process_logpath = os.path.join( self.log_path_base, '%s_%s_backing_logs.txt' % (type_identifier, label)) self.backing_process_logs = open(backing_process_logpath, 'w') cmd_str = json.dumps(cmd) if "--btsnoop=" not in cmd_str: btsnoop_path = os.path.join(self.log_path_base, '%s_btsnoop_hci.log' % label) cmd.append("--btsnoop=" + btsnoop_path) self.serial_number = serial_number if self.serial_number: self.ad = AdbProxy(serial_number) self.ad.shell("date " + time.strftime("%m%d%H%M%Y.%S")) self.ad.tcp_forward(int(grpc_port), int(grpc_port)) self.ad.tcp_forward(int(grpc_root_server_port), int(grpc_root_server_port)) self.ad.reverse("tcp:%s tcp:%s" % (signal_port, signal_port)) self.ad.push( os.path.join(ANDROID_PRODUCT_OUT, "system/bin/bluetooth_stack_with_facade"), "system/bin") self.ad.push( os.path.join(ANDROID_PRODUCT_OUT, "system/lib64/libbluetooth_gd.so"), "system/lib64") self.ad.push( os.path.join(ANDROID_PRODUCT_OUT, "system/lib64/libgrpc++_unsecure.so"), "system/lib64") self.ad.shell("logcat -c") self.ad.shell("rm /data/misc/bluetooth/logs/btsnoop_hci.log") self.ad.shell("svc bluetooth disable") tester_signal_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) tester_signal_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) socket_address = ('localhost', int(signal_port)) tester_signal_socket.bind(socket_address) tester_signal_socket.listen(1) self.backing_process = subprocess.Popen( cmd, cwd=os.getcwd(), env=os.environ.copy(), stdout=self.backing_process_logs, stderr=self.backing_process_logs) tester_signal_socket.accept() tester_signal_socket.close() self.grpc_root_server_channel = grpc.insecure_channel( "localhost:" + grpc_root_server_port) self.grpc_port = int(grpc_port) self.grpc_channel = grpc.insecure_channel("localhost:" + grpc_port)
def setup_class(self, dut_module, cert_module): self.dut_module = dut_module self.cert_module = cert_module self.log_path_base = get_current_context().get_full_output_path() self.verbose_mode = bool(self.user_params.get('verbose_mode', False)) for config in self.controller_configs[CONTROLLER_CONFIG_NAME]: config['verbose_mode'] = self.verbose_mode # Start root-canal if needed self.rootcanal_running = False if 'rootcanal' in self.controller_configs: self.rootcanal_running = True # Get root canal binary rootcanal = os.path.join(get_gd_root(), "root-canal") asserts.assert_true(os.path.isfile(rootcanal), "Root canal does not exist at %s" % rootcanal) # Get root canal log self.rootcanal_logpath = os.path.join(self.log_path_base, 'rootcanal_logs.txt') # Make sure ports are available rootcanal_config = self.controller_configs['rootcanal'] rootcanal_test_port = int(rootcanal_config.get( "test_port", "6401")) rootcanal_hci_port = int(rootcanal_config.get("hci_port", "6402")) rootcanal_link_layer_port = int( rootcanal_config.get("link_layer_port", "6403")) asserts.assert_true( make_ports_available((rootcanal_test_port, rootcanal_hci_port, rootcanal_link_layer_port)), "Failed to make root canal ports available") # Start root canal process rootcanal_cmd = [ rootcanal, str(rootcanal_test_port), str(rootcanal_hci_port), str(rootcanal_link_layer_port) ] self.log.debug("Running %s" % " ".join(rootcanal_cmd)) self.rootcanal_process = subprocess.Popen(rootcanal_cmd, cwd=get_gd_root(), env=os.environ.copy(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) asserts.assert_true(self.rootcanal_process, msg="Cannot start root-canal at " + str(rootcanal)) asserts.assert_true( is_subprocess_alive(self.rootcanal_process), msg="root-canal stopped immediately after running") self.rootcanal_logger = AsyncSubprocessLogger( self.rootcanal_process, [self.rootcanal_logpath], log_to_stdout=self.verbose_mode, tag="rootcanal", color=TerminalColor.MAGENTA) # Modify the device config to include the correct root-canal port for gd_device_config in self.controller_configs.get("GdDevice"): gd_device_config["rootcanal_port"] = str(rootcanal_hci_port) # Parse and construct GD device objects self.register_controller(importlib.import_module('cert.gd_device'), builtin=True) self.dut = self.gd_devices[1] self.cert = self.gd_devices[0]
def process_testclass_results(self): """Saves all test results to enable comparison.""" testclass_data = collections.OrderedDict() for test in self.testclass_results: current_params = test['testcase_params'] channel_data = testclass_data.setdefault(current_params['channel'], collections.OrderedDict()) test_id = tuple( self.extract_test_id(current_params, [ 'mode', 'traffic_type', 'traffic_direction', 'signal_level' ]).items()) test_data = channel_data.setdefault( test_id, collections.OrderedDict(position=[], throughput=[])) current_throughput = (numpy.mean( test['iperf_result'].instantaneous_rates[ self.testclass_params['iperf_ignored_interval']:-1]) ) * 8 * (1.024**2) test_data['position'].append(current_params['position']) test_data['throughput'].append(current_throughput) chamber_mode = self.testclass_results[0]['testcase_params'][ 'chamber_mode'] if chamber_mode == 'orientation': x_label = 'Angle (deg)' elif chamber_mode == 'stepped stirrers': x_label = 'Position Index' # Publish test class metrics for channel, channel_data in testclass_data.items(): for test_id, test_data in channel_data.items(): test_id_dict = dict(test_id) metric_tag = 'ota_summary_{}_{}_{}_ch{}_{}'.format( test_id_dict['signal_level'], test_id_dict['traffic_type'], test_id_dict['traffic_direction'], channel, test_id_dict['mode']) metric_name = metric_tag + '.avg_throughput' metric_value = numpy.mean(test_data['throughput']) self.testclass_metric_logger.add_metric( metric_name, metric_value) metric_name = metric_tag + '.min_throughput' metric_value = min(test_data['throughput']) self.testclass_metric_logger.add_metric( metric_name, metric_value) # Plot test class results plots = [] for channel, channel_data in testclass_data.items(): current_plot = wputils.BokehFigure( title='Channel {} - Rate vs. Position'.format(channel), x_label=x_label, primary_y_label='Rate (Mbps)', ) for test_id, test_data in channel_data.items(): test_id_dict = dict(test_id) legend = '{}, {} {}, {} RSSI'.format( test_id_dict['mode'], test_id_dict['traffic_type'], test_id_dict['traffic_direction'], test_id_dict['signal_level']) current_plot.add_line(test_data['position'], test_data['throughput'], legend) current_plot.generate_figure() plots.append(current_plot) current_context = context.get_current_context().get_full_output_path() plot_file_path = os.path.join(current_context, 'results.html') wputils.BokehFigure.save_figures(plots, plot_file_path)
def process_testclass_results(self): """Saves and plots test results from all executed test cases.""" testclass_results_dict = collections.OrderedDict() id_fields = ['channel', 'mode', 'rate'] plots = [] for result in self.testclass_results: test_id = self.extract_test_id(result['testcase_params'], id_fields) test_id = tuple(test_id.items()) chain_mask = result['testcase_params']['chain_mask'] num_streams = result['testcase_params']['num_streams'] line_id = (chain_mask, num_streams) if test_id not in testclass_results_dict: testclass_results_dict[test_id] = collections.OrderedDict() if line_id not in testclass_results_dict[test_id]: testclass_results_dict[test_id][line_id] = { 'orientation': [], 'sensitivity': [] } orientation = result['testcase_params']['orientation'] if result['peak_throughput_pct'] >= 95: sensitivity = result['sensitivity'] else: sensitivity = float('nan') if orientation not in testclass_results_dict[test_id][line_id][ 'orientation']: testclass_results_dict[test_id][line_id]['orientation'].append( orientation) testclass_results_dict[test_id][line_id]['sensitivity'].append( sensitivity) else: testclass_results_dict[test_id][line_id]['sensitivity'][ -1] = sensitivity for test_id, test_data in testclass_results_dict.items(): test_id_dict = dict(test_id) if 'legacy' in test_id_dict['mode']: test_id_str = 'Channel {} - {} {}Mbps'.format( test_id_dict['channel'], test_id_dict['mode'], test_id_dict['rate']) else: test_id_str = 'Channel {} - {} MCS{}'.format( test_id_dict['channel'], test_id_dict['mode'], test_id_dict['rate']) curr_plot = wputils.BokehFigure( title=str(test_id_str), x_label='Orientation (deg)', primary_y_label='Sensitivity (dBm)') for line_id, line_results in test_data.items(): curr_plot.add_line(line_results['orientation'], line_results['sensitivity'], legend='Nss{} - Chain Mask {}'.format( line_id[1], line_id[0]), marker='circle') if 'legacy' in test_id_dict['mode']: metric_tag = 'ota_summary_ch{}_{}_{}_ch{}'.format( test_id_dict['channel'], test_id_dict['mode'], test_id_dict['rate'], line_id[0]) else: metric_tag = 'ota_summary_ch{}_{}_mcs{}_nss{}_ch{}'.format( test_id_dict['channel'], test_id_dict['mode'], test_id_dict['rate'], line_id[1], line_id[0]) metric_name = metric_tag + '.avg_sensitivity' metric_value = numpy.nanmean(line_results['sensitivity']) self.testclass_metric_logger.add_metric( metric_name, metric_value) self.log.info(("Average Sensitivity for {}: {:.1f}").format( metric_tag, metric_value)) current_context = ( context.get_current_context().get_full_output_path()) output_file_path = os.path.join(current_context, str(test_id_str) + '.html') curr_plot.generate_figure(output_file_path) plots.append(curr_plot) output_file_path = os.path.join(current_context, 'results.html') wputils.BokehFigure.save_figures(plots, output_file_path)