def prepare_run(self, testxmlfile, resultdir=None): """ testxmlfile: target testxml file execdir and resultdir: should be the absolute path since TRunner is the common lib """ # resultdir is set to current directory by default if not resultdir: resultdir = os.getcwd() ok_prepare = True if ok_prepare: try: filename = testxmlfile filename = os.path.splitext(filename)[0] if platform.system() == "Linux": filename = filename.split('/')[-2] else: filename = filename.split('\\')[-2] if self.filter_rules["execution_type"] == ["manual"]: resultfile = "%s.manual.xml" % filename else: resultfile = "%s.auto.xml" % filename resultfile = JOIN(resultdir, resultfile) if not EXISTS(resultdir): os.mkdir(resultdir) LOGGER.info("[ analysis test xml file: %s ]" % resultfile) self.__prepare_result_file(testxmlfile, resultfile) self.__split_test_xml(resultfile, resultdir) except IOError as error: LOGGER.error(error) ok_prepare &= False return ok_prepare
def __write_file_result(self, set_result_xml, set_result): """write xml result file""" result_file = set_result['resultfile'] try: if self.rerun: LOGGER.info("[ Web UI FW Unit Test Does not support rerun.\ Result should be N/A ]\n") else: test_tree = etree.parse(set_result_xml) test_em = test_tree.getroot() result_tree = etree.parse(result_file) result_em = result_tree.getroot() dubug_file = os.path.basename(set_result_xml) dubug_file = os.path.splitext(dubug_file)[0] + '.dlog' for result_suite in result_em.getiterator('suite'): for result_set in result_suite.getiterator('set'): for test_suite in test_em.getiterator('suite'): for test_set in test_suite.getiterator('set'): if result_set.get('name') == \ test_set.get('name'): result_set.set("set_debug_msg", dubug_file) test_suite.remove(test_set) test_suite.append(result_set) test_tree.write(set_result_xml) os.remove(result_file) LOGGER.info("[ cases result saved to resultfile ]\n") except OSError as error: traceback.print_exc() LOGGER.error( "[ Error: fail to write cases result, error: %s ]\n" % error)
def killall(ppid): """Kill all children process by parent process ID""" sys_platform = platform.system() try: if sys_platform == "Linux": ppid = str(ppid) pidgrp = [] def getchildpids(ppid): """Return a list of children process""" command = "ps -ef | awk '{if ($3 == %s) print $2;}'" % str(ppid) pids = os.popen(command).read() pids = pids.split() return pids pidgrp.extend(getchildpids(ppid)) for pid in pidgrp: pidgrp.extend(getchildpids(pid)) # Insert self process ID to PID group list pidgrp.insert(0, ppid) while len(pidgrp) > 0: pid = pidgrp.pop() try: os.kill(int(pid), signal.SIGKILL) except OSError, error: pattern = re.compile('No such process') match = pattern.search(str(error)) if not match: LOGGER.info( "[ Error: fail to kill pid: %s, error: %s ]\n" % ( int(pid), error)) # kill for windows platform else:
def __prepare_starup_parameters(self, testxml): """ prepare_starup_parameters """ starup_parameters = {} LOGGER.info("[ prepare_starup_parameters ]") try: parse_tree = etree.parse(testxml) tsuite = parse_tree.getroot().getiterator('suite')[0] tset = parse_tree.getroot().getiterator('set')[0] if tset.get("launcher") is not None: starup_parameters['test-launcher'] = tset.get("launcher") else: starup_parameters['test-launcher'] = tsuite.get("launcher") starup_parameters['testsuite-name'] = tsuite.get("name") starup_parameters['testset-name'] = tset.get("name") starup_parameters['stub-name'] = self.stub_name if self.external_test is not None: starup_parameters['external-test'] = self.external_test starup_parameters['debug'] = self.debug starup_parameters['test_prefix'] = self.test_prefix if self.rerun: starup_parameters['rerun'] = self.rerun if len(self.capabilities) > 0: starup_parameters['capability'] = self.capabilities except IOError as error: LOGGER.error( "[ Error: prepare starup parameters, error: %s ]" % error) return starup_parameters
def __run_with_commodule(self, webapi_file): """run_with_commodule,Initialization,check status,get result""" try: # prepare test set list test_xml_set_list = self.__split_xml_to_set(webapi_file) # create temporary parameter for test_xml_set in test_xml_set_list: LOGGER.info("\n[ run set: %s ]" % test_xml_set) # prepare the test JSON self.__prepare_external_test_json(test_xml_set) # init test here init_status = self.__init_com_module(test_xml_set) if not init_status: continue # send set JSON Data to com_module self.testworker.run_test( self.session_id, self.set_parameters) while True: time.sleep(1) # check the test status ,if the set finished,get # the set_result,and finalize_test if self.__check_test_status(): set_result = self.testworker.get_test_result( self.session_id) # write_result to set_xml self.__write_set_result( test_xml_set, set_result) # shut down server self.finalize_test(self.session_id) break except IOError as error: LOGGER.error( "[ Error: fail to run webapi test xml, error: %s ]" % error)
def __run_webapi_test(self, latest_dir): """ run webAPI test""" if self.bdryrun: LOGGER.info("[ WRTLauncher mode does not support dryrun ]") return True list_auto = [] list_manual = [] for i in self.exe_sequence: if i[-4::1] == "auto": list_auto.append(i) if i[-6::1] == "manual": list_manual.append(i) list_auto.sort() list_manual.sort() self.exe_sequence = [] self.exe_sequence.extend(list_auto) self.exe_sequence.extend(list_manual) for webapi_total_file in self.exe_sequence: for webapi_file in self.testsuite_dict[webapi_total_file]: # print identical xml file name if self.current_test_xml != JOIN(latest_dir, webapi_total_file): time.sleep(3) LOGGER.info("\n[ testing xml: %s.xml ]\n" % JOIN(latest_dir, webapi_total_file)) self.current_test_xml = JOIN(latest_dir, webapi_total_file) self.__run_with_commodule(webapi_file)
def run_test(self, sessionid, test_set): """ process the execution for a test set """ if sessionid is None: return False if not "cases" in test_set: return False # start debug trace thread dlogfile = test_set['current_set_name'].replace('.xml', '.dlog') self.opts['dlog_file'] = dlogfile self.conn.start_debug(dlogfile) time.sleep(1) self.result_obj = TestSetResut( self.opts['testsuite_name'], self.opts['testset_name']) cases, exetype, ctype = test_set[ "cases"], test_set["exetype"], test_set["type"] if self.opts['test_type'] == "webapi": return self.__run_web_test(sessionid, self.opts['testset_name'], exetype, ctype, cases) elif self.opts['test_type'] == "coreapi": return self.__run_core_test(sessionid, self.opts['testset_name'], exetype, cases) elif self.opts['test_type'] == "jqunit": return self.__run_jqt_test(sessionid, self.opts['testset_name'], cases) else: LOGGER.info("[ unsupported test suite type ! ]") return False
def __get_test_options(self, deviceid, test_launcher, test_suite): """get test option dict """ test_opt = {} test_opt["suite_name"] = test_suite cmd = "" if test_launcher.find("WRTLauncher") != -1: test_opt["launcher"] = "wrt-launcher" # test suite need to be installed by commodule if self.__test_auto_iu: test_wgt = self.__test_wgt cmd = WRT_INSTALL_STR % (deviceid, test_suite, test_wgt) exit_code, ret = shell_command(cmd) else: test_wgt = test_suite # query the whether test widget is installed ok cmd = WRT_QUERY_STR % (deviceid, test_wgt) exit_code, ret = shell_command(cmd) if len(ret) == 0: LOGGER.info('[ test widget "%s" not installed in target ]' % test_wgt) return None else: test_opt["suite_id"] = ret[0].strip("\r\n") self.__test_wgt = test_opt["suite_id"] else: test_opt["launcher"] = test_launcher return test_opt
def _upload_file(deviceid, remote_path, local_path): """upload file to device""" cmd = "sdb -s %s push %s %s" % (deviceid, local_path, remote_path) exit_code, result = shell_command(cmd) if exit_code != 0: LOGGER.info('[ Upload file "%s" failed,' " get error: %s ]" % (local_path, result)) return False else: return True
def _download_file(deviceid, remote_path, local_path): """download file from device""" cmd = "sdb -s %s pull %s %s" % (deviceid, remote_path, local_path) exit_code, ret = shell_command(cmd) if exit_code != 0: LOGGER.info('[ Download file "%s" from target failed, error: %s ]' % (remote_path, ret[0].strip("\r\n"))) return False else: return True
def download_file(self, remote_path, local_path): """download file from device""" cmd = "adb -s %s pull %s %s" % (self.deviceid, remote_path, local_path) exit_code, ret = shell_command(cmd) if exit_code != 0: error = ret[0].strip('\r\n') if len(ret) else "sdb shell timeout" LOGGER.info("[ Download file \"%s\" failed, error: %s ]" % (remote_path, error)) return False else: return True
def upload_file(self, remote_path, local_path): """upload file to device""" cmd = "sdb -s %s push %s %s" % (self.deviceid, local_path, remote_path) exit_code, ret = shell_command(cmd) if exit_code != 0: error = ret[0].strip('\r\n') if len(ret) else "sdb shell timeout" LOGGER.info("[ Upload file \"%s\" failed," " get error: %s ]" % (local_path, error)) return False else: return True
def write_json_result(set_result_xml, set_result): ''' fetch result form JSON''' case_results = set_result["cases"] try: parse_tree = etree.parse(set_result_xml) root_em = parse_tree.getroot() dubug_file = os.path.basename(set_result_xml) dubug_file = os.path.splitext(dubug_file)[0] + '.dlog' for tset in root_em.getiterator('set'): tset.set("set_debug_msg", dubug_file) for tcase in tset.getiterator('testcase'): for case_result in case_results: if tcase.get("id") == case_result['case_id']: tcase.set('result', case_result['result'].upper()) # Check performance test if tcase.find('measurement') is not None: for measurement in tcase.getiterator( 'measurement'): if 'measures' in case_result: m_results = case_result['measures'] for m_result in m_results: if measurement.get('name') == \ m_result['name'] and 'value' in m_result: measurement.set( 'value', m_result[ 'value']) if tcase.find("./result_info") is not None: tcase.remove(tcase.find("./result_info")) result_info = etree.SubElement(tcase, "result_info") actual_result = etree.SubElement( result_info, "actual_result") actual_result.text = case_result['result'].upper() start = etree.SubElement(result_info, "start") end = etree.SubElement(result_info, "end") stdout = etree.SubElement(result_info, "stdout") stderr = etree.SubElement(result_info, "stderr") if 'start_at' in case_result: start.text = case_result['start_at'] if 'end_at' in case_result: end.text = case_result['end_at'] if 'stdout' in case_result: stdout.text = str2xmlstr(case_result['stdout']) if 'stderr' in case_result: stderr.text = str2xmlstr(case_result['stderr']) parse_tree.write(set_result_xml) LOGGER.info("[ cases result saved to resultfile ]\n") except IOError as error: traceback.print_exc() LOGGER.error( "[ Error: fail to write cases result, error: %s ]\n" % error)
def extend_result(self, cases_result=None, print_out=True): """update cases result to the result buffer""" self._mutex.acquire() if cases_result is not None: self._result["cases"].extend(cases_result) if print_out: for case_it in cases_result: LOGGER.info(self._progress % (self._suite_name, case_it['case_id'], case_it['result'])) if case_it['result'].lower() in ['fail', 'block'] and 'stdout' in case_it: LOGGER.info(case_it['stdout']) self._mutex.release()
def kill_testkit_lite(pid_file): """ kill testkit lite""" try: with open(pid_file, "r") as pidfile: pid = pidfile.readline().rstrip("\n") if pid: killall(pid) except IOError, error: pattern = re.compile('No such file or directory|No such process') match = pattern.search(str(error)) if not match: LOGGER.info("[ Error: fail to kill existing testkit-lite, "\ "error: %s ]\n" % error)
def __run_web_test(self, sessionid, test_set_name, exetype, ctype, cases): """ process the execution for web api test may be splitted to serveral blocks, with the unit size defined by block_size """ if self.__test_self_exec: self.__test_async_shell = QUTestExecThread(deviceid=self.__device_id, sessionid=sessionid) self.__test_async_shell.start() return True if self.__test_self_repeat: global TEST_SERVER_RESULT, TEST_SERVER_STATUS result_file = os.path.expanduser("~") + os.sep + sessionid + "_uifw.xml" b_ok = _download_file(self.__device_id, UIFW_RESULT, result_file) LOGGER.info("[ web uifw test suite result splitting ...]") if b_ok: TEST_SERVER_RESULT = {"resultfile": result_file} TEST_SERVER_STATUS = {"finished": 1} else: TEST_SERVER_RESULT = {"resultfile": ""} TEST_SERVER_STATUS = {"finished": 1} return True case_count = len(cases) blknum = 0 if case_count % self.__test_set_block == 0: blknum = case_count / self.__test_set_block else: blknum = case_count / self.__test_set_block + 1 idx = 1 test_set_blocks = [] while idx <= blknum: block_data = {} block_data["exetype"] = exetype block_data["type"] = ctype block_data["totalBlk"] = str(blknum) block_data["currentBlk"] = str(idx) block_data["casecount"] = str(case_count) start = (idx - 1) * self.__test_set_block if idx == blknum: end = case_count else: end = idx * self.__test_set_block block_data["cases"] = cases[start:end] test_set_blocks.append(block_data) idx += 1 self.__test_async_http = WebTestExecThread(self.__stub_server_url, test_set_name, test_set_blocks) self.__test_async_http.start() return True
def __run_core_auto(self): """ core auto cases run""" self.core_auto_files.sort() for core_auto_file in self.core_auto_files: temp_test_xml = os.path.splitext(core_auto_file)[0] temp_test_xml = os.path.splitext(temp_test_xml)[0] temp_test_xml = os.path.splitext(temp_test_xml)[0] temp_test_xml += ".auto" # print identical xml file name if self.current_test_xml != temp_test_xml: time.sleep(3) LOGGER.info("\n[ testing xml: %s.xml ]" % temp_test_xml) self.current_test_xml = temp_test_xml self.__run_with_commodule(core_auto_file)
def __init_webtest_opt(self, params): """init the test runtime, mainly process the star up of test stub""" if params is None: return None session_id = str(uuid.uuid1()) cmdline = "" debug_opt = "" stub_app = params.get('stub-name', 'testkit-stub') stub_port = params.get('stub-port', '8000') test_launcher = params.get('external-test', '') testsuite_name = params.get('testsuite-name', '') testset_name = params.get('testset-name', '') capability_opt = params.get("capability", None) client_cmds = params.get('test-launcher', '').strip().split() wrt_tag = client_cmds[1] if len(client_cmds) > 1 else "" self.opts['fuzzy_match'] = fuzzy_match = wrt_tag.find('z') != -1 self.opts['auto_iu'] = auto_iu = wrt_tag.find('iu') != -1 self.opts['self_exec'] = wrt_tag.find('a') != -1 self.opts['self_repeat'] = wrt_tag.find('r') != -1 self.opts['debug_mode'] = params.get("debug", False) test_opt = self.conn.get_launcher_opt( test_launcher, testsuite_name, testset_name, fuzzy_match, auto_iu) if test_opt is None: LOGGER.info("[ init the test options, get failed ]") return None # to be removed in later version test_opt["suite_id"] = test_opt["test_app_id"] self.opts.update(test_opt) # uifw, this suite don't need stub if self.opts['self_exec'] or self.opts['self_repeat']: self.opts['test_type'] = "jqunit" return session_id # enable debug information if self.opts['debug_mode']: debug_opt = '--debug' if self.__init_test_stub(stub_app, stub_port, debug_opt): ret = http_request(get_url( self.server_url, "/init_test"), "POST", test_opt) if ret is None: LOGGER.info("[ init test suite failed! ]") return None elif "error_code" in ret: LOGGER.info("[ init test suite, " "get error code %d ! ]" % ret["error_code"]) return None if capability_opt is not None: ret = http_request(get_url(self.server_url, "/set_capability"), "POST", capability_opt) return session_id else: LOGGER.info("[ Init test failed ! ]") return None
def __split_xml_to_set(self, webapi_file): """split xml by <set>""" LOGGER.debug("[ split xml: %s by <set> ]" % webapi_file) LOGGER.debug("[ this might take some time, please wait ]") set_number = 1 test_xml_set_list = [] self.resultfiles.discard(webapi_file) test_xml_temp = etree.parse(webapi_file) for test_xml_temp_suite in test_xml_temp.getiterator('suite'): while set_number <= len(test_xml_temp_suite.getiterator('set')): copy_url = os.path.splitext(webapi_file)[0] copy_url += "_set_%s.xml" % set_number copyfile(webapi_file, copy_url) test_xml_set_list.append(copy_url) self.resultfiles.add(copy_url) set_number += 1 time.sleep(3) set_number -= 1 LOGGER.info("[ total set number is: %s ]" % set_number) # only keep one set in each xml file and remove empty set test_xml_set_list_empty = [] if len(test_xml_set_list) > 1: test_xml_set_list.reverse() for test_xml_set in test_xml_set_list: test_xml_set_tmp = etree.parse(test_xml_set) set_keep_number = 1 # LOGGER.debug("[ process set: %s ]" % test_xml_set) for temp_suite in test_xml_set_tmp.getiterator('suite'): for test_xml_set_temp_set in temp_suite.getiterator('set'): if set_keep_number != set_number: temp_suite.remove(test_xml_set_temp_set) else: if not test_xml_set_temp_set.getiterator('testcase'): test_xml_set_list_empty.append(test_xml_set) set_keep_number += 1 set_number -= 1 test_xml_set_tmp.write(test_xml_set) for empty_set in test_xml_set_list_empty: LOGGER.debug("[ remove empty set: %s ]" % empty_set) test_xml_set_list.remove(empty_set) self.resultfiles.discard(empty_set) if len(test_xml_set_list) > 1: test_xml_set_list.reverse() return test_xml_set_list
def __run_core_manual(self): """ core manual cases run """ self.core_manual_files.sort() for core_manual_file in self.core_manual_files: temp_test_xml = os.path.splitext(core_manual_file)[0] temp_test_xml = os.path.splitext(temp_test_xml)[0] temp_test_xml = os.path.splitext(temp_test_xml)[0] temp_test_xml += ".manual" # print identical xml file name if self.current_test_xml != temp_test_xml: time.sleep(3) LOGGER.info("\n[ testing xml: %s.xml ]" % temp_test_xml) self.current_test_xml = temp_test_xml if self.non_active: self.skip_all_manual = True else: self.__run_with_commodule(core_manual_file)
def get_launcher_opt(self, test_launcher, test_suite, test_set, fuzzy_match, auto_iu): """ get test option dict """ test_opt = {} test_opt["suite_name"] = test_suite test_opt["launcher"] = test_launcher test_opt["test_app_id"] = test_launcher self._wrt = False if test_launcher.find('WRTLauncher') != -1: self._wrt = True cmd = "" test_app_id = None test_opt["launcher"] = "wrt-launcher" # test suite need to be installed by commodule if auto_iu: test_wgt = test_set test_wgt_path = "/opt/usr/media/tct/opt/%s/%s.wgt" % (test_suite, test_wgt) if not self.install_app(test_wgt_path): LOGGER.info("[ failed to install widget \"%s\" in target ]" % test_wgt) return None else: test_wgt = test_suite # query the whether test widget is installed ok cmd = WRT_QUERY_STR % (self.deviceid, test_wgt) exit_code, ret = shell_command(cmd) if exit_code == -1: return None for line in ret: items = line.split(':') if len(items) < 1: continue if (fuzzy_match and items[0].find(test_wgt) != -1) or items[0] == test_wgt: test_app_id = items[1].strip('\r\n') break if test_app_id is None: LOGGER.info("[ test widget \"%s\" not found in target ]" % test_wgt) return None else: test_opt["test_app_id"] = test_app_id return test_opt
def _get_test_options(test_launcher, test_suite): """get test option dict """ test_opt = {} if test_launcher.find('WRTLauncher') != -1: test_opt["launcher"] = "wrt-launcher" cmd = "wrt-launcher -l | grep %s | awk '{print $NF}'" % test_suite exit_code, ret = shell_command(cmd) if len(ret) == 0: LOGGER.info("[ test suite \"%s\" not found in target ]" % test_suite) return None else: test_opt["suite_id"] = ret[0].strip('\r\n') else: test_opt["launcher"] = test_launcher test_opt["suite_name"] = test_suite return test_opt
def download_file(self, remote_path, local_path): """download file from device""" local_path_dir = os.path.dirname(local_path) if not os.path.exists(local_path_dir): os.makedirs(local_path_dir) filename = os.path.basename(remote_path) cmd = "sdb -s %s pull %s %s" % ( self.deviceid, remote_path, local_path_dir) exit_code, ret = shell_command(cmd) if exit_code != 0: error = ret[0].strip('\r\n') if len(ret) else "sdb shell timeout" LOGGER.info("[ Download file \"%s\" failed, error: %s ]" % (remote_path, error)) return False else: src_path = os.path.join(local_path_dir, filename) if src_path != local_path: shutil.move(src_path, local_path) return True
def _print_dlog(dlog_file): if os.path.exists(dlog_file): LOGGER.info('[ start of dlog message ]') readbuff = file(dlog_file, "r") for line in readbuff.readlines(): LOGGER.info(line.strip('\n')) LOGGER.info('[ end of dlog message ]')
def _webuifw_test_exec(conn, test_web_app, test_session, test_set_name, exetype, cases_queue, result_obj): """function for running webuifw tests""" global UIFW_SET_NUM UIFW_SET_NUM = UIFW_SET_NUM + 1 set_UIFW_RESULT = UIFW_RESULT + "_" + str(UIFW_SET_NUM) +".xml" result_obj.set_status(0) result_obj.set_result({"resultfile": ""}) ls_cmd = "ls -l %s" % set_UIFW_RESULT time_out = UIFW_MAX_TIME rm_cmd = "rm /opt/usr/media/Documents/tcresult*.xml" if exetype == "auto": conn.shell_cmd(rm_cmd) UIFW_SET_NUM = 1 LOGGER.info('[webuifw] start test executing') if not conn.launch_app(test_web_app): LOGGER.info("[ launch test app \"%s\" failed! ]" % self.opts['test_app_id']) result_obj.set_result({"resultfile": ""}) result_obj.set_status(1) result_file = os.path.expanduser("~") + os.sep + test_session + "_uifw.xml" while time_out > 0: LOGGER.info('[webuifw] waiting for test completed...') exit_code, ret = conn.shell_cmd(ls_cmd) if 'No such file or directory' in ret[0]: continue else: break time.sleep(2) time_out -= 2 LOGGER.info('[webuifw] end test executing') if conn.download_file(set_UIFW_RESULT, result_file): result_obj.set_result({"resultfile": result_file}) for test_case in cases_queue: LOGGER.info("[webuifw] execute case: %s # %s" % (test_set_name, test_case['case_id'])) result_obj.set_status(1)
def __init_test_stub(self, stub_app, stub_port, debug_opt): # init testkit-stub deamon process timecnt = 0 blaunched = False while timecnt < CNT_RETRY: if not self.conn.check_process(stub_app): LOGGER.info("[ no stub process activated, now try to launch %s ]" % stub_app) self.conn.launch_stub(stub_app, stub_port, debug_opt) timecnt += 1 else: blaunched = True break if not blaunched: LOGGER.info("[ launch stub process failed! ]") return False if self.server_url is None: self.server_url = self.conn.get_server_url(stub_port) timecnt = 0 blaunched = False while timecnt < CNT_RETRY: ret = http_request(get_url( self.server_url, "/check_server_status"), "GET", {}) if ret is None: LOGGER.info("[ check server status, not ready yet! ]") timecnt += 1 time.sleep(1) else: blaunched = True break return blaunched
def run(self): """run Qunit tests""" global TEST_SERVER_RESULT, TEST_SERVER_STATUS LOCK_OBJ.acquire() TEST_SERVER_RESULT = {"resultfile": ""} TEST_SERVER_STATUS = {"finished": 0} LOCK_OBJ.release() ls_cmd = "sdb -s %s shell ls -l %s" % (self.device_id, UIFW_RESULT) exit_code, ret = shell_command(ls_cmd) if len(ret) > 0: prev_stamp = ret[0] else: prev_stamp = "" time_stamp = "" prev_stamp = "" LOGGER.info("[ web uifw test suite start ...]") time_out = 600 status_cnt = 0 while time_out > 0: time.sleep(2) time_out -= 2 exit_code, ret = shell_command(ls_cmd) if len(ret) > 0: time_stamp = ret[0] else: time_stamp = "" if time_stamp == prev_stamp: continue else: prev_stamp = time_stamp status_cnt += 1 if status_cnt == 1: LOGGER.info("[ web uifw begin generating result xml ... ]") elif status_cnt >= 2: LOGGER.info("[ web uifw end generating result xml ... ]") result_file = os.path.expanduser("~") + os.sep + self.test_session + "_uifw.xml" b_ok = _download_file(self.device_id, UIFW_RESULT, result_file) if b_ok: LOCK_OBJ.acquire() TEST_SERVER_RESULT = {"resultfile": result_file} LOCK_OBJ.release() break LOGGER.info("[ web uifw test suite completed ... ]") LOCK_OBJ.acquire() TEST_SERVER_STATUS = {"finished": 1} LOCK_OBJ.release()
def __check_test_status(self): ''' get_test_status from com_module check the status if end ,return ture; else return False ''' # check test running or end # if the status id end return True ,else return False session_status = self.testworker.get_test_status(self.session_id) # session_status["finished"] == "0" is running # session_status["finished"] == "1" is end if not session_status == None: if session_status["finished"] == "0": progress_msg_list = session_status["msg"] for line in progress_msg_list: LOGGER.info(line) return False elif session_status["finished"] == "1": return True else: LOGGER.error("[ session status error ,pls finalize test ]\n") # return True to finished this set ,becasue server error return True
def __init_webtest_opt(self, params): """init the test runtime, mainly process the star up of test stub""" if params is None: return None session_id = str(uuid.uuid1()) debug_opt = "" test_opt = None capability_opt = None stub_app = params["stub-name"] stub_port = "8000" test_launcher = params["external-test"] testsuite_name = params["testsuite-name"] if "debug" in params and params["debug"]: debug_opt = "--debug" if "capability" in params: capability_opt = params["capability"] test_opt = _get_test_options(test_launcher, testsuite_name) if test_opt is None: return None LOGGER.info("[ launch the stub httpserver ]") cmdline = " killall %s " % stub_app exit_code, ret = shell_command(cmdline) time.sleep(2) cmdline = "%s --port:%s %s" % (stub_app, stub_port, debug_opt) self.__test_async_shell = StubExecThread( cmd=cmdline, sessionid=session_id) self.__test_async_shell.start() time.sleep(2) self.__server_url = "http://%s:%s" % (HOST_NS, stub_port) timecnt = 0 blaunched = False while timecnt < 10: time.sleep(1) ret = http_request(get_url(self.__server_url, "/check_server_status"), "GET", {}) if ret is None: LOGGER.info("[ check server status, not ready yet! ]") timecnt += 1 continue if "error_code" in ret: LOGGER.info("[ check server status, " "get error code %d ! ]" % ret["error_code"]) return None else: LOGGER.info("[ check server status, get ready! ]") blaunched = True break if blaunched: ret = http_request(get_url(self.__server_url, "/init_test"), "POST", test_opt) if "error_code" in ret: LOGGER.info("[ init test suite, " "get error code %d ! ]" % ret["error_code"]) return None if capability_opt is not None: ret = http_request(get_url(self.__server_url, "/set_capability"), "POST", capability_opt) return session_id else: LOGGER.info("[ connect to server timeout! ]") return None
def run(self): """run core tests""" if self.cases_queue is None: return total_count = len(self.cases_queue) current_idx = 0 manual_skip_all = False result_list = [] global TEST_SERVER_STATUS, TEST_SERVER_RESULT LOCK_OBJ.acquire() TEST_SERVER_RESULT = {"cases": []} TEST_SERVER_STATUS = {"finished": 0} LOCK_OBJ.release() for test_case in self.cases_queue: current_idx += 1 expected_result = "0" core_cmd = "" time_out = None measures = [] retmeasures = [] if "entry" in test_case: core_cmd = test_case["entry"] else: LOGGER.info( "[ Warnning: test script is empty," " please check your test xml file ]") continue if "expected_result" in test_case: expected_result = test_case["expected_result"] if "timeout" in test_case: time_out = int(test_case["timeout"]) if "measures" in test_case: measures = test_case["measures"] LOGGER.info("\n[case] execute case:\nTestCase: %s\nTestEntry: %s\n" "Expected Result: %s\nTotal: %s, Current: %s" % ( test_case['case_id'], test_case['entry'], expected_result, total_count, current_idx)) LOGGER.info("[ execute test script," "this might take some time, please wait ]") strtime = datetime.now().strftime(DATE_FORMAT_STR) LOGGER.info("start time: %s" % strtime) test_case["start_at"] = strtime if self.exetype == 'auto': return_code, stdout, stderr = shell_command_ext( cmd=core_cmd, timeout=time_out, boutput=False) if return_code is not None: actual_result = str(return_code) if actual_result == "timeout": test_case["result"] = "BLOCK" test_case["stdout"] = "none" test_case["stderr"] = "none" else: if actual_result == expected_result: test_case["result"] = "pass" else: test_case["result"] = "fail" test_case["stdout"] = stdout test_case["stderr"] = stderr for item in measures: ind = item['name'] fname = item['file'] if fname and os.path.exists(fname): try: config = ConfigParser.ConfigParser() config.read(fname) item['value'] = config.get(ind, 'value') retmeasures.append(item) except IOError, error: LOGGER.error( "[ Error: failed to parse value," " error: %s ]\n" % error) test_case["measures"] = retmeasures else: test_case["result"] = "BLOCK" test_case["stdout"] = "none" test_case["stderr"] = "none" elif self.exetype == 'manual': # handle manual core cases try: # LOGGER.infopre-condition info if "pre_condition" in test_case: LOGGER.info("\n****\nPre-condition: %s\n ****\n" % test_case[ 'pre_condition']) # LOGGER.infostep info if "steps" in test_case: for step in test_case['steps']: LOGGER.info( "********************\nStep Order: %s" % step['order']) LOGGER.info("Step Desc: %s" % step['step_desc']) LOGGER.info( "Expected: %s\n********************\n" % step['expected']) if manual_skip_all: test_case["result"] = "N/A" else: while True: test_result = raw_input( '[ please input case result ] ' '(p^PASS, f^FAIL, b^BLOCK, n^Next, d^Done):') if test_result.lower() == 'p': test_case["result"] = "PASS" break elif test_result.lower() == 'f': test_case["result"] = "FAIL" break elif test_result.lower() == 'b': test_case["result"] = "BLOCK" break elif test_result.lower() == 'n': test_case["result"] = "N/A" break elif test_result.lower() == 'd': manual_skip_all = True test_case["result"] = "N/A" break else: LOGGER.info( "[ Warning: you input: '%s' is invalid," " please try again ]" % test_result) except IOError, error: LOGGER.error( "[ Error: fail to get core manual test step," " error: %s ]\n" % error)