def handle_device_test_finish(data): """ test finished without reset """ # in this scenario reset should not happen if int(data[1]): # case ignored Utility.console_log("Ignored: " + self.child_case_name, color="orange") one_device_case_finish(not int(data[0]))
def handle_next_stage(data): """ reboot finished. we goto next stage """ if last_stage(): # already last stage, should never goto next stage Utility.console_log("didn't finish at last stage", color='orange') one_case_finish(False) else: stage_finish.append('continue')
def log_test_case(description, test_case, ut_config): Utility.console_log("Running {} '{}' (config {})".format( description, test_case['name'], ut_config), color='orange') Utility.console_log('Tags: %s' % ', '.join( '%s=%s' % (k, v) for (k, v) in test_case.items() if k != 'name' and v is not None), color='orange')
def __exit__(self, type, value, traceback): log_folder = self.app.get_log_folder(TEST_SUITE) with open(os.path.join(log_folder, "log_" + self.test_name + ".txt"), "w") as log_file: Utility.console_log("Writing output of {} to {}".format(self.test_name, log_file.name)) log_file.write(self.get_raw_data()) if self.gdb: self.gdb.exit() self.close()
def test_wifi_throughput_vs_rssi(env, extra_data): """ steps: | 1. build with best performance config 2. switch on one router 3. set attenuator value from 0-60 for each router 4. test TCP tx rx and UDP tx rx throughput """ att_port = env.get_variable("attenuator_port") ap_list = env.get_variable("ap_list") pc_nic_ip = env.get_pc_nic_info("pc_nic", "ipv4")["addr"] apc_ip = env.get_variable("apc_ip") pc_iperf_log_file = os.path.join(env.log_path, "pc_iperf_log.md") test_result = { "tcp_tx": TestResult("tcp", "tx", BEST_PERFORMANCE_CONFIG), "tcp_rx": TestResult("tcp", "rx", BEST_PERFORMANCE_CONFIG), "udp_tx": TestResult("udp", "tx", BEST_PERFORMANCE_CONFIG), "udp_rx": TestResult("udp", "rx", BEST_PERFORMANCE_CONFIG), } # 1. get DUT and download dut = env.get_dut("iperf", "examples/wifi/iperf", dut_class=ttfw_idf.ESP32DUT, app_config_name=BEST_PERFORMANCE_CONFIG) dut.start_app() dut.expect("esp32>") # 2. run test for each required att value for ap_info in ap_list: test_utility = IperfTestUtility(dut, BEST_PERFORMANCE_CONFIG, ap_info["ssid"], ap_info["password"], pc_nic_ip, pc_iperf_log_file, test_result) PowerControl.Control.control_rest(apc_ip, ap_info["outlet"], "OFF") PowerControl.Control.control(apc_ip, {ap_info["outlet"]: "ON"}) Attenuator.set_att(att_port, 0) if not test_utility.wait_ap_power_on(): Utility.console_log( "[{}] failed to power on, skip testing this AP".format( ap_info["ssid"]), color="red") continue for atten_val in ATTEN_VALUE_LIST: assert Attenuator.set_att(att_port, atten_val) is True test_utility.run_all_cases(atten_val) # 3. check test results env.close_dut("iperf") # 4. generate report report = TestReport.ThroughputVsRssiReport( os.path.join(env.log_path, "ThroughputVsRssiReport"), test_result) report.generate_report()
def test_wifi_throughput_with_different_configs(env, extra_data): """ steps: | 1. build iperf with specified configs 2. test throughput for all routers """ pc_nic_ip = env.get_pc_nic_info("pc_nic", "ipv4")["addr"] pc_iperf_log_file = os.path.join(env.log_path, "pc_iperf_log.md") ap_info = { "ssid": env.get_variable("ap_ssid"), "password": env.get_variable("ap_password"), } config_names_raw = subprocess.check_output(["ls", os.path.dirname(os.path.abspath(__file__))]) config_names = CONFIG_NAME_PATTERN.findall(config_names_raw) if not config_names: raise ValueError("no configs found in {}".format(os.path.dirname(__file__))) test_result = dict() sdkconfig_files = dict() for config_name in config_names: # 1. get the config sdkconfig_files[config_name] = os.path.join(os.path.dirname(__file__), "sdkconfig.ci.{}".format(config_name)) # 2. get DUT and download dut = env.get_dut("iperf", "examples/wifi/iperf", dut_class=ttfw_idf.ESP32DUT, app_config_name=config_name) dut.start_app() dut.expect("iperf>") # 3. run test for each required att value test_result[config_name] = { "tcp_tx": TestResult("tcp", "tx", config_name), "tcp_rx": TestResult("tcp", "rx", config_name), "udp_tx": TestResult("udp", "tx", config_name), "udp_rx": TestResult("udp", "rx", config_name), } test_utility = IperfTestUtility(dut, config_name, ap_info["ssid"], ap_info["password"], pc_nic_ip, pc_iperf_log_file, test_result[config_name]) for _ in range(RETRY_COUNT_FOR_BEST_PERFORMANCE): test_utility.run_all_cases(0) for result_type in test_result[config_name]: summary = str(test_result[config_name][result_type]) if summary: Utility.console_log(summary, color="orange") # 4. check test results env.close_dut("iperf") # 5. generate report report = TestReport.ThroughputForConfigsReport(os.path.join(env.log_path, "ThroughputForConfigsReport"), ap_info["ssid"], test_result, sdkconfig_files) report.generate_report()
def test_wifi_throughput_vs_rssi(env, extra_data): """ steps: | 1. build with best performance config 2. switch on one router 3. set attenuator value from 0-60 for each router 4. test TCP tx rx and UDP tx rx throughput """ att_port = env.get_variable('attenuator_port') ap_list = env.get_variable('ap_list') pc_nic_ip = env.get_pc_nic_info('pc_nic', 'ipv4')['addr'] apc_ip = env.get_variable('apc_ip') pc_iperf_log_file = os.path.join(env.log_path, 'pc_iperf_log.md') test_result = { 'tcp_tx': TestResult('tcp', 'tx', BEST_PERFORMANCE_CONFIG), 'tcp_rx': TestResult('tcp', 'rx', BEST_PERFORMANCE_CONFIG), 'udp_tx': TestResult('udp', 'tx', BEST_PERFORMANCE_CONFIG), 'udp_rx': TestResult('udp', 'rx', BEST_PERFORMANCE_CONFIG), } # 1. get DUT and download dut = env.get_dut('iperf', 'examples/wifi/iperf', dut_class=ttfw_idf.ESP32DUT, app_config_name=BEST_PERFORMANCE_CONFIG) dut.start_app() dut.expect_any('iperf>', 'esp32>') # 2. run test for each required att value for ap_info in ap_list: test_utility = IperfTestUtility(dut, BEST_PERFORMANCE_CONFIG, ap_info['ssid'], ap_info['password'], pc_nic_ip, pc_iperf_log_file, test_result) PowerControl.Control.control_rest(apc_ip, ap_info['outlet'], 'OFF') PowerControl.Control.control(apc_ip, {ap_info['outlet']: 'ON'}) Attenuator.set_att(att_port, 0) if not test_utility.wait_ap_power_on(): Utility.console_log( '[{}] failed to power on, skip testing this AP'.format( ap_info['ssid']), color='red') continue for atten_val in ATTEN_VALUE_LIST: assert Attenuator.set_att(att_port, atten_val) is True test_utility.run_all_cases(atten_val) # 3. check test results env.close_dut('iperf') # 4. generate report report = TestReport.ThroughputVsRssiReport( os.path.join(env.log_path, 'STAThroughputVsRssiReport'), test_result) report.generate_report()
def close(self): try: self.p.sendline('q') self.p.expect_exact('Quit anyway? (y or n)') self.p.sendline('y') self.p.expect_exact('Ending remote debugging.') except Exception: Utility.console_log('gdb needs to be killed', 'O') super(GDBProcess, self).close()
def handle_test_finish(data): """ test finished without reset """ # in this scenario reset should not happen assert not exception_reset_list if int(data[1]): # case ignored Utility.console_log("Ignored: " + format_case_name(one_case), color="orange") junit_test_case.add_skipped_info("ignored") one_case_finish(not int(data[0]))
def replace_app_bin(dut, name, new_app_bin): if new_app_bin is None: return search_pattern = '/{}.bin'.format(name) for i, config in enumerate(dut.download_config): if config.endswith(search_pattern): dut.download_config[i] = new_app_bin Utility.console_log("The replaced application binary is {}".format(new_app_bin), "O") break
def process_coredump_flash(self): """ Extract the core dump from flash, run espcoredump on it """ log_folder = self.app.get_log_folder(TEST_SUITE) coredump_file_name = os.path.join(log_folder, "coredump_data_" + self.test_name + ".bin") Utility.console_log("Writing flash binary core dump to " + coredump_file_name) self.dump_flush(coredump_file_name, partition="coredump") output_file_name = os.path.join(log_folder, "coredump_flash_result_" + self.test_name + ".txt") self._call_espcoredump(["--core-format", "raw"], coredump_file_name, output_file_name)
def process_coredump_flash(self): """ Extract the core dump from flash, run espcoredump on it """ log_folder = self.app.get_log_folder(TEST_SUITE) coredump_file_name = os.path.join(log_folder, 'coredump_data_' + self.test_name + '.bin') Utility.console_log('Writing flash binary core dump to ' + coredump_file_name) self.dump_flash(coredump_file_name, partition='coredump') output_file_name = os.path.join(log_folder, 'coredump_flash_result_' + self.test_name + '.txt') self._call_espcoredump(['--core-format', 'raw'], coredump_file_name, output_file_name)
def adder_result(self): if len(self.response) != self.depth: Utility.console_log("Error : missing response packets") return False for i in range(len(self.response)): if not test_val("Thread" + str(self.id) + " response[" + str(i) + "]", str(self.id * (i + 1)), str(self.response[i])): return False return True
def __init__(self, cmd, logfile, verbose=True): self.verbose = verbose self.f = open(logfile, 'w') if self.verbose: Utility.console_log('Starting {} > {}'.format(cmd, self.f.name)) self.pexpect_proc = pexpect.spawn(cmd, timeout=60, logfile=self.f, encoding='utf-8')
def close(self): try: self.pexpect_proc.sendline('exit') self.pexpect_proc.expect_exact( 'Connection closed by foreign host.') except Exception: if self.verbose: Utility.console_log('telnet needs to be killed', 'O') super(TelnetProcess, self).close()
def run_unit_test_cases(env, extra_data): """ extra_data can be three types of value 1. as string: 1. "case_name" 2. "case_name [reset=RESET_REASON]" 2. as dict: 1. with key like {"name": "Intr_alloc test, shared ints"} 2. with key like {"name": "restart from PRO CPU", "reset": "SW_CPU_RESET", "config": "psram"} 3. as list of string or dict: [case1, case2, case3, {"name": "restart from PRO CPU", "reset": "SW_CPU_RESET"}, ...] :param env: test env instance :param extra_data: the case name or case list or case dictionary :return: None """ case_config = format_test_case_config(extra_data, env.default_dut_cls.TARGET) # we don't want stop on failed case (unless some special scenarios we can't handle) # this flag is used to log if any of the case failed during executing # Before exit test function this flag is used to log if the case fails failed_cases = [] for ut_config in case_config: Utility.console_log("Running unit test for config: " + ut_config, "O") dut = env.get_dut("unit-test-app", app_path=UT_APP_PATH, app_config_name=ut_config, allow_dut_exception=True) if len(case_config[ut_config]) > 0: replace_app_bin(dut, "unit-test-app", case_config[ut_config][0].get('app_bin')) dut.start_app() Utility.console_log("Download finished, start running test cases", "O") for one_case in case_config[ut_config]: log_test_case("test case", one_case, ut_config) performance_items = [] # create junit report test case junit_test_case = TinyFW.JunitReport.create_test_case( format_case_name(one_case)) try: run_one_normal_case(dut, one_case, junit_test_case) performance_items = dut.get_performance_items() except TestCaseFailed: failed_cases.append(format_case_name(one_case)) except Exception as e: handle_unexpected_exception(junit_test_case, e) failed_cases.append(format_case_name(one_case)) finally: TinyFW.JunitReport.update_performance(performance_items) TinyFW.JunitReport.test_case_finish(junit_test_case) # close DUT when finish running all cases for one config env.close_dut(dut.name)
def code_431_hdr_too_long(dut, port, max_hdr_len): Utility.console_log("[test] 431 Header Too Long =>", end=' ') res, status = send_postx_hdr_len(dut, port, max_hdr_len) if not res: return False res, status = send_postx_hdr_len(dut, port, max_hdr_len + 1) if not test_val("Client Error", "431", status): return False Utility.console_log("Success") return True
def code_414_uri_too_long(dut, port, max_uri_len): Utility.console_log("[test] 414 URI Too Long =>", end=' ') status = send_getx_uri_len(dut, port, max_uri_len) if not test_val("Client Error", "404", status): return False status = send_getx_uri_len(dut, port, max_uri_len + 1) if not test_val("Client Error", "414", status): return False Utility.console_log("Success") return True
def code_431_hdr_too_long(dut, port, max_hdr_len): Utility.console_log('[test] 431 Header Too Long =>', end=' ') res, status = send_postx_hdr_len(dut, port, max_hdr_len) if not res: return False res, status = send_postx_hdr_len(dut, port, max_hdr_len + 1) if not test_val('Client Error', '431', status): return False Utility.console_log('Success') return True
def code_414_uri_too_long(dut, port, max_uri_len): Utility.console_log('[test] 414 URI Too Long =>', end=' ') status = send_getx_uri_len(dut, port, max_uri_len) if not test_val('Client Error', '404', status): return False status = send_getx_uri_len(dut, port, max_uri_len + 1) if not test_val('Client Error', '414', status): return False Utility.console_log('Success') return True
def test_startup(env, extra_data): config_files = glob.glob(os.path.join(os.path.dirname(__file__), 'sdkconfig.ci.*')) config_names = [os.path.basename(s).replace('sdkconfig.ci.', '') for s in config_files] for name in config_names: Utility.console_log("Checking config \"{}\"... ".format(name), end='') dut = env.get_dut('startup', 'tools/test_apps/system/startup', app_config_name=name) dut.start_app() dut.expect('app_main running') env.close_dut(dut.name) Utility.console_log('done')
def one_case_finish(result): """ one test finished, let expect loop break and log result """ test_finish.append(True) output = dut.stop_capture_raw_data() if result: Utility.console_log("Success: " + format_case_name(one_case), color="green") else: Utility.console_log("Failed: " + format_case_name(one_case), color="red") junit_test_case.add_failure_info(output) raise TestCaseFailed()
def adder_result(self): if len(self.response) != self.depth: Utility.console_log('Error : missing response packets') return False for i in range(len(self.response)): if not test_val( 'Thread' + str(self.id) + ' response[' + str(i) + ']', str(self.id * (i + 1)), str(self.response[i])): return False return True
def start_test(self, test_name): """ Starts the app and sends it the test name """ self.test_name = test_name # Start the app and verify that it has started up correctly self.start_capture_raw_data() self.start_app() self.expect("Enter test name: ") Utility.console_log("Setting boot command: " + test_name) self.write(test_name) self.expect("Got test name: " + test_name)
def test_example_app_ble_peripheral(env, extra_data): """ Steps: 1. Discover Bluetooth Adapter and Power On 2. Connect BLE Device 3. Read Services 4. Read Characteristics 5. Write Characteristics """ subprocess.check_output(['rm', '-rf', '/var/lib/bluetooth/*']) subprocess.check_output(['hciconfig', 'hci0', 'reset']) # Acquire DUT dut = env.get_dut("bleprph", "examples/bluetooth/nimble/bleprph", dut_class=ttfw_idf.ESP32DUT) # Get binary file binary_file = os.path.join(dut.app.binary_path, "bleprph.bin") bin_size = os.path.getsize(binary_file) ttfw_idf.log_performance("bleprph_bin_size", "{}KB".format(bin_size // 1024)) ttfw_idf.check_performance("bleprph_bin_size", bin_size // 1024, dut.TARGET) # Upload binary and start testing Utility.console_log("Starting bleprph simple example test app") dut.start_app() dut.reset() # Get device address from dut dut_addr = dut.expect(re.compile(r"Device Address: ([a-fA-F0-9:]+)"), timeout=30)[0] exceptions_queue = Queue.Queue() # Starting a py-client in a separate thread bleprph_thread_obj = BlePrphThread(dut, dut_addr, exceptions_queue) bleprph_thread_obj.start() bleprph_thread_obj.join() exception_msg = None while True: try: exception_msg = exceptions_queue.get(block=False) except Queue.Empty: break else: Utility.console_log("\n" + exception_msg) if exception_msg: raise Exception("Thread did not run successfully") # Check dut responses dut.expect("connection established; status=0", timeout=30) dut.expect("disconnect;", timeout=30)
def send_err_check(self, request, data=None): rval = True try: self.client.sendall(request.encode()) if data: self.client.sendall(data.encode()) except socket.error as err: self.client.close() Utility.console_log('Socket Error in send :', err) rval = False return rval
def putreq(conn, path, body, verbose=False): conn.request("PUT", path, body) resp = conn.getresponse() data = resp.read() if verbose: Utility.console_log("PUT : " + path, body) Utility.console_log("Status : " + resp.status) Utility.console_log("Reason : " + resp.reason) Utility.console_log("Data length : " + str(len(data))) Utility.console_log("Data content : " + data) return data
def detect_exception(self, comp_data): for pattern in self.EXCEPTION_PATTERNS: start = 0 while True: match = pattern.search(comp_data, pos=start) if match: start = match.end() self.exceptions.put(match.group(0)) Utility.console_log("[Exception]: {}".format(match.group(0)), color="red") else: break
def test_loop(env, config_names): # type: (Any, Any) -> None for name in config_names: Utility.console_log("Checking config \"{}\"... ".format(name), end='') dut = env.get_dut('flash_psram', 'tools/test_apps/system/flash_psram', app_config_name=name) dut.start_app() dut.expect('flash psram test success') env.close_dut(dut.name) Utility.console_log('done')
def run_multiple_devices_cases(env, extra_data): """ extra_data can be two types of value 1. as dict: e.g. {"name": "gpio master/slave test example", "child case num": 2, "config": "release", "env_tag": "UT_T2_1"} 2. as list dict: e.g. [{"name": "gpio master/slave test example1", "child case num": 2, "config": "release", "env_tag": "UT_T2_1"}, {"name": "gpio master/slave test example2", "child case num": 2, "config": "release", "env_tag": "UT_T2_1"}] """ failed_cases = [] case_config = format_test_case_config(extra_data) duts = {} for ut_config in case_config: Utility.console_log("Running unit test for config: " + ut_config, "O") for one_case in case_config[ut_config]: log_test_case("multi-device test", one_case, ut_config, ) result = False junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case)) try: result = run_one_multiple_devices_case(duts, ut_config, env, one_case, one_case.get('app_bin'), junit_test_case) except TestCaseFailed: pass # result is False, this is handled by the finally block except Exception as e: handle_unexpected_exception(junit_test_case, e) finally: if result: Utility.console_log("Success: " + format_case_name(one_case), color="green") else: failed_cases.append(format_case_name(one_case)) Utility.console_log("Failed: " + format_case_name(one_case), color="red") TinyFW.JunitReport.test_case_finish(junit_test_case) # close all DUTs when finish running all cases for one config for dut in duts: env.close_dut(dut) duts = {} if failed_cases: Utility.console_log("Failed Cases:", color="red") for _case_name in failed_cases: Utility.console_log("\t" + _case_name, color="red") raise TestCaseFailed(*failed_cases)