Exemplo n.º 1
0
 def finalize_test(self, sessionid):
     '''shut_down testkit-stub'''
     try:
         self.testworker.finalize_test(sessionid)
     except Exception as error:
         LOGGER.error("[ Error: fail to close webapi http server, "
                      "error: %s ]" % error)
Exemplo n.º 2
0
    def __run_with_commodule(self, webapi_file):
        """run_with_commodule,Initialization,check status,get result"""
        try:
            # prepare test set list
            test_xml_set_list = self.__split_xml_to_set(webapi_file)
            # create temporary parameter
            for test_xml_set in test_xml_set_list:
                LOGGER.info("\n[ run set: %s ]" % test_xml_set)
                # prepare the test JSON
                self.__prepare_external_test_json(test_xml_set)

                # init test here
                init_status = self.__init_com_module(test_xml_set)
                if not init_status:
                    continue
                # send set JSON Data to com_module
                self.testworker.run_test(
                    self.session_id, self.set_parameters)
                while True:
                    time.sleep(1)
                    # check the test status ,if the set finished,get
                    # the set_result,and finalize_test
                    if self.__check_test_status():
                        set_result = self.testworker.get_test_result(
                            self.session_id)
                        # write_result to set_xml
                        self.__write_set_result(
                            test_xml_set, set_result)
                        # shut down server
                        self.finalize_test(self.session_id)
                        break
        except IOError as error:
            LOGGER.error(
                "[ Error: fail to run webapi test xml, error: %s ]" % error)
Exemplo n.º 3
0
 def __write_file_result(self, set_result_xml, set_result):
     """write xml result file"""
     result_file = set_result['resultfile']
     try:
         if self.rerun:
             LOGGER.info("[ Web UI FW Unit Test Does not support rerun.\
                   Result should be N/A ]\n")
         else:
             test_tree = etree.parse(set_result_xml)
             test_em = test_tree.getroot()
             result_tree = etree.parse(result_file)
             result_em = result_tree.getroot()
             dubug_file = os.path.basename(set_result_xml)
             dubug_file = os.path.splitext(dubug_file)[0] + '.dlog'
             for result_suite in result_em.getiterator('suite'):
                 for result_set in result_suite.getiterator('set'):
                     for test_suite in test_em.getiterator('suite'):
                         for test_set in test_suite.getiterator('set'):
                             if result_set.get('name') == \
                                     test_set.get('name'):
                                 result_set.set("set_debug_msg", dubug_file)
                                 test_suite.remove(test_set)
                                 test_suite.append(result_set)
             test_tree.write(set_result_xml)
             os.remove(result_file)
         LOGGER.info("[ cases result saved to resultfile ]\n")
     except OSError as error:
         traceback.print_exc()
         LOGGER.error(
             "[ Error: fail to write cases result, error: %s ]\n" % error)
Exemplo n.º 4
0
 def __prepare_result_file(self, testxmlfile, resultfile):
     """ write the test_xml content to resultfile"""
     try:
         parse_tree = etree.parse(testxmlfile)
         suiteparent = parse_tree.getroot()
         no_test_definition = 1
         if parse_tree.getiterator('test_definition'):
             no_test_definition = 0
         if no_test_definition:
             suiteparent = etree.Element('test_definition')
             suiteparent.tail = "\n"
             for suite in parse_tree.getiterator('suite'):
                 suite.tail = "\n"
                 suiteparent.append(suite)
         self.apply_filter(suiteparent)
         try:
             with open(resultfile, 'w') as output:
                 tree = etree.ElementTree(element=suiteparent)
                 tree.write(output)
         except IOError as error:
             LOGGER.error("[ Error: create filtered result file: %s failed,\
                 error: %s ]" % (resultfile, error))
     except IOError as error:
         LOGGER.error(error)
         return False
Exemplo n.º 5
0
 def prepare_run(self, testxmlfile, resultdir=None):
     """
     testxmlfile: target testxml file
     execdir and resultdir: should be the absolute path since TRunner
     is the common lib
     """
     # resultdir is set to current directory by default
     if not resultdir:
         resultdir = os.getcwd()
     ok_prepare = True
     if ok_prepare:
         try:
             filename = testxmlfile
             filename = os.path.splitext(filename)[0]
             if platform.system() == "Linux":
                 filename = filename.split('/')[-2]
             else:
                 filename = filename.split('\\')[-2]
             if self.filter_rules["execution_type"] == ["manual"]:
                 resultfile = "%s.manual.xml" % filename
             else:
                 resultfile = "%s.auto.xml" % filename
             resultfile = JOIN(resultdir, resultfile)
             if not EXISTS(resultdir):
                 os.mkdir(resultdir)
             LOGGER.info("[ analysis test xml file: %s ]" % resultfile)
             self.__prepare_result_file(testxmlfile, resultfile)
             self.__split_test_xml(resultfile, resultdir)
         except IOError as error:
             LOGGER.error(error)
             ok_prepare &= False
     return ok_prepare
Exemplo n.º 6
0
    def __prepare_starup_parameters(self, testxml):
        """ prepare_starup_parameters """

        starup_parameters = {}
        LOGGER.info("[ prepare_starup_parameters ]")
        try:
            parse_tree = etree.parse(testxml)
            tsuite = parse_tree.getroot().getiterator('suite')[0]
            tset = parse_tree.getroot().getiterator('set')[0]
            if tset.get("launcher") is not None:
                starup_parameters['test-launcher'] = tset.get("launcher")
            else:
                starup_parameters['test-launcher'] = tsuite.get("launcher")
            starup_parameters['testsuite-name'] = tsuite.get("name")
            starup_parameters['testset-name'] = tset.get("name")
            starup_parameters['stub-name'] = self.stub_name
            if self.external_test is not None:
                starup_parameters['external-test'] = self.external_test
            starup_parameters['debug'] = self.debug
            starup_parameters['test_prefix'] = self.test_prefix
            if self.rerun:
                starup_parameters['rerun'] = self.rerun
            if len(self.capabilities) > 0:
                starup_parameters['capability'] = self.capabilities
        except IOError as error:
            LOGGER.error(
                "[ Error: prepare starup parameters, error: %s ]" % error)
        return starup_parameters
Exemplo n.º 7
0
def write_json_result(set_result_xml, set_result):
    ''' fetch result form JSON'''

    case_results = set_result["cases"]
    try:
        parse_tree = etree.parse(set_result_xml)
        root_em = parse_tree.getroot()
        dubug_file = os.path.basename(set_result_xml)
        dubug_file = os.path.splitext(dubug_file)[0] + '.dlog'
        for tset in root_em.getiterator('set'):
            tset.set("set_debug_msg", dubug_file)
            for tcase in tset.getiterator('testcase'):
                for case_result in case_results:
                    if tcase.get("id") == case_result['case_id']:
                        tcase.set('result', case_result['result'].upper())
                        # Check performance test
                        if tcase.find('measurement') is not None:
                            for measurement in tcase.getiterator(
                                    'measurement'):
                                if 'measures' in case_result:
                                    m_results = case_result['measures']
                                    for m_result in m_results:
                                        if measurement.get('name') == \
                                                m_result['name'] and 'value' in m_result:
                                            measurement.set(
                                                'value', m_result[
                                                    'value'])
                        if tcase.find("./result_info") is not None:
                            tcase.remove(tcase.find("./result_info"))
                        result_info = etree.SubElement(tcase, "result_info")
                        actual_result = etree.SubElement(
                            result_info, "actual_result")
                        actual_result.text = case_result['result'].upper()

                        start = etree.SubElement(result_info, "start")
                        end = etree.SubElement(result_info, "end")
                        stdout = etree.SubElement(result_info, "stdout")
                        stderr = etree.SubElement(result_info, "stderr")
                        if 'start_at' in case_result:
                            start.text = case_result['start_at']
                        if 'end_at' in case_result:
                            end.text = case_result['end_at']
                        if 'stdout' in case_result:
                            stdout.text = str2xmlstr(case_result['stdout'])
                        if 'stderr' in case_result:
                            stderr.text = str2xmlstr(case_result['stderr'])
        parse_tree.write(set_result_xml)

        LOGGER.info("[ cases result saved to resultfile ]\n")
    except IOError as error:
        traceback.print_exc()
        LOGGER.error(
            "[ Error: fail to write cases result, error: %s ]\n" % error)
Exemplo n.º 8
0
 def get_test_result(self, sessionid):
     """get the test result for a test set """
     result = {}
     if sessionid is None:
         return result
     try:
         global TEST_SERVER_RESULT
         LOCK_OBJ.acquire()
         result = TEST_SERVER_RESULT
         LOCK_OBJ.release()
     except OSError, error:
         LOGGER.error("[ Error: failed to get test result, error:%s ]\n" % error)
Exemplo n.º 9
0
 def __init_com_module(self, testxml):
     """
         send init test to com_module
         if webapi test,com_module will start testkit-stub
         else com_module send the test case to devices
     """
     starup_prms = self.__prepare_starup_parameters(testxml)
     # init stub and get the session_id
     session_id = self.testworker.init_test(starup_prms)
     if session_id == None:
         LOGGER.error("[ Error: Initialization Error]")
         return False
     else:
         self.set_session_id(session_id)
         return True
Exemplo n.º 10
0
def replace_cdata(file_name):
    """ replace some character"""
    try:
        abs_path = mktemp()
        new_file = open(abs_path, 'w')
        old_file = open(file_name)
        for line in old_file:
            line_temp = line.replace('&lt;![CDATA', '<![CDATA')
            new_file.write(line_temp.replace(']]&gt;', ']]>'))
        new_file.close()
        old_file.close()
        remove(file_name)
        move(abs_path, file_name)
    except IOError as error:
        LOGGER.error("[ Error: fail to replace cdata in the result file, "
                     "error: %s ]\n" % error)
Exemplo n.º 11
0
def get_version_info():
    """
        get testkit tool version ,just read the version in VERSION file
        VERSION file must put in /opt/testkit/lite/
    """
    try:
        config = ConfigParser.ConfigParser()
        if platform.system() == "Linux":
            config.read('/opt/testkit/lite/VERSION')
        else:
            version_file = os.path.join(sys.path[0], 'VERSION')
            config.read(version_file)
        version = config.get('public_version', 'version')
        return version
    except KeyError as error:
        LOGGER.error(
            "[ Error: fail to parse version info, error: %s ]\n" % error)
        return ""
Exemplo n.º 12
0
    def get_capability(self, file_name):
        """get_capability from file """

        capability_xml = file_name
        capabilities = {}
        try:
            parse_tree = etree.parse(capability_xml)
            root_em = parse_tree.getroot()
            for tcap in root_em.getiterator('capability'):
                capability = get_capability_form_node(tcap)
                capabilities = dict(capabilities, **capability)

            self.set_capability(capabilities)
            return True
        except IOError as error:
            LOGGER.error(
                "[ Error: fail to parse capability xml, error: %s ]" % error)
            return False
Exemplo n.º 13
0
    def __splite_external_test(self, resultfile, test_file_name, resultdir):
        """select external_test"""
        testsuite_dict_value_list = []
        testsuite_dict_add_flag = 0
        filename_diff = 1

        parser = etree.parse(resultfile)
        for tsuite in parser.getiterator('suite'):
            root = etree.Element('test_definition')
            suitefilename = os.path.splitext(resultfile)[0]
            suitefilename += ".suite_%s.xml" % filename_diff
            suitefilename = JOIN(resultdir, suitefilename)
            tsuite.tail = "\n"
            root.append(tsuite)
            try:
                with open(suitefilename, 'w') as output:
                    tree = etree.ElementTree(element=root)
                    tree.write(output)
            except IOError as error:
                LOGGER.error("[ Error: create filtered result file: %s failed,\
                 error: %s ]" % (suitefilename, error))
            case_suite_find = etree.parse(
                suitefilename).getiterator('testcase')
            if case_suite_find:
                if tsuite.get('launcher'):
                    if tsuite.get('launcher').find('WRTLauncher'):
                        self.__splite_core_test(suitefilename)
                    else:
                        testsuite_dict_value_list.append(suitefilename)
                        if testsuite_dict_add_flag == 0:
                            self.exe_sequence.append(test_file_name)
                        testsuite_dict_add_flag = 1
                        self.resultfiles.add(suitefilename)
                else:
                    if self.filter_rules["execution_type"] == ["auto"]:
                        self.core_auto_files.append(suitefilename)
                    else:
                        self.core_manual_files.append(suitefilename)
                    self.resultfiles.add(suitefilename)
            filename_diff += 1
        if testsuite_dict_add_flag:
            self.testsuite_dict[test_file_name] = testsuite_dict_value_list
Exemplo n.º 14
0
 def __merge_result_by_name(
         self, result_set, total_set, result_suite, total_suite):
     ''' merge result select by name'''
     if result_set.get('name') == total_set.get('name') \
             and result_suite.get('name') == total_suite.get('name'):
         if result_set.get('set_debug_msg'):
             total_set.set("set_debug_msg", result_set.get('set_debug_msg'))
         # set cases that doesn't have result in result \
         # set to N/A
         # append cases from result set to total set
         result_case_iterator = result_set.getiterator(
             'testcase')
         if result_case_iterator:
             # LOGGER.info("----[ suite: %s, set: %s, time: %s ]"
             #% (result_suite.get('name'), result_set.get('name'),
             #    datetime.today().strftime("%Y-%m-%d_%H_%M_%S")))
             for result_case in result_case_iterator:
                 try:
                     self.__count_result(result_case)
                     total_set.append(result_case)
                 except IOError as error:
                     LOGGER.error("[ Error: fail to append %s, error: %s ]"
                                  % (result_case.get('id'), error))
Exemplo n.º 15
0
    def __check_test_status(self):
        '''
            get_test_status from com_module
            check the status
            if end ,return ture; else return False
        '''
        # check test running or end
        # if the status id end return True ,else return False

        session_status = self.testworker.get_test_status(self.session_id)
        # session_status["finished"] == "0" is running
        # session_status["finished"] == "1" is end
        if not session_status == None:
            if session_status["finished"] == "0":
                progress_msg_list = session_status["msg"]
                for line in progress_msg_list:
                    LOGGER.info(line)
                return False
            elif session_status["finished"] == "1":
                return True
        else:
            LOGGER.error("[ session status error ,pls finalize test ]\n")
            # return True to finished this set  ,becasue server error
            return True
Exemplo n.º 16
0
    def run(self):
        """run core tests"""
        if self.cases_queue is None:
            return
        total_count = len(self.cases_queue)
        current_idx = 0
        manual_skip_all = False
        result_list = []

        global TEST_SERVER_STATUS, TEST_SERVER_RESULT
        LOCK_OBJ.acquire()
        TEST_SERVER_RESULT = {"cases": []}
        TEST_SERVER_STATUS = {"finished": 0}
        LOCK_OBJ.release()
        for test_case in self.cases_queue:
            current_idx += 1
            expected_result = "0"
            core_cmd = ""
            time_out = None
            measures = []
            retmeasures = []
            if "entry" in test_case:
                core_cmd = test_case["entry"]
            else:
                LOGGER.info(
                    "[ Warnning: test script is empty,"
                    " please check your test xml file ]")
                continue
            if "expected_result" in test_case:
                expected_result = test_case["expected_result"]
            if "timeout" in test_case:
                time_out = int(test_case["timeout"])
            if "measures" in test_case:
                measures = test_case["measures"]

            LOGGER.info("\n[case] execute case:\nTestCase: %s\nTestEntry: %s\n"
                        "Expected Result: %s\nTotal: %s, Current: %s" % (
                        test_case['case_id'], test_case['entry'],
                        expected_result, total_count, current_idx))
            LOGGER.info("[ execute test script,"
                        "this might take some time, please wait ]")
            strtime = datetime.now().strftime(DATE_FORMAT_STR)
            LOGGER.info("start time: %s" % strtime)
            test_case["start_at"] = strtime
            if self.exetype == 'auto':
                return_code, stdout, stderr = shell_command_ext(
                    cmd=core_cmd, timeout=time_out, boutput=False)
                if return_code is not None:
                    actual_result = str(return_code)
                    if actual_result == "timeout":
                        test_case["result"] = "BLOCK"
                        test_case["stdout"] = "none"
                        test_case["stderr"] = "none"
                    else:
                        if actual_result == expected_result:
                            test_case["result"] = "pass"
                        else:
                            test_case["result"] = "fail"
                        test_case["stdout"] = stdout
                        test_case["stderr"] = stderr

                        for item in measures:
                            ind = item['name']
                            fname = item['file']
                            if fname and os.path.exists(fname):
                                try:
                                    config = ConfigParser.ConfigParser()
                                    config.read(fname)
                                    item['value'] = config.get(ind, 'value')
                                    retmeasures.append(item)
                                except IOError, error:
                                    LOGGER.error(
                                        "[ Error: failed to parse value,"
                                        " error: %s ]\n" % error)
                        test_case["measures"] = retmeasures
                else:
                    test_case["result"] = "BLOCK"
                    test_case["stdout"] = "none"
                    test_case["stderr"] = "none"
            elif self.exetype == 'manual':
                # handle manual core cases
                try:
                    # LOGGER.infopre-condition info
                    if "pre_condition" in test_case:
                        LOGGER.info("\n****\nPre-condition: %s\n ****\n"
                                    % test_case[
                                    'pre_condition'])
                    # LOGGER.infostep info
                    if "steps" in test_case:
                        for step in test_case['steps']:
                            LOGGER.info(
                                "********************\nStep Order: %s"
                                % step['order'])
                            LOGGER.info("Step Desc: %s" % step['step_desc'])
                            LOGGER.info(
                                "Expected: %s\n********************\n"
                                % step['expected'])
                    if manual_skip_all:
                        test_case["result"] = "N/A"
                    else:
                        while True:
                            test_result = raw_input(
                                '[ please input case result ] '
                                '(p^PASS, f^FAIL, b^BLOCK, n^Next, d^Done):')
                            if test_result.lower() == 'p':
                                test_case["result"] = "PASS"
                                break
                            elif test_result.lower() == 'f':
                                test_case["result"] = "FAIL"
                                break
                            elif test_result.lower() == 'b':
                                test_case["result"] = "BLOCK"
                                break
                            elif test_result.lower() == 'n':
                                test_case["result"] = "N/A"
                                break
                            elif test_result.lower() == 'd':
                                manual_skip_all = True
                                test_case["result"] = "N/A"
                                break
                            else:
                                LOGGER.info(
                                    "[ Warning: you input: '%s' is invalid,"
                                    " please try again ]" % test_result)
                except IOError, error:
                    LOGGER.error(
                        "[ Error: fail to get core manual test step,"
                        " error: %s ]\n" % error)
Exemplo n.º 17
0
def _web_test_exec(conn, server_url, test_web_app, exetype, cases_queue, result_obj):
    """function for running web tests"""
    exetype = exetype.lower()
    test_set_finished = False
    err_cnt = 0
    relaunch_cnt = 0
    for test_group in cases_queue:
        if test_set_finished:
            break

        ret = http_request(
            get_url(server_url, "/set_testcase"), "POST", test_group, 30)
        if ret is None:
            LOGGER.error(
                "[ set testcases timeout, please check device! ]")
            result_obj.set_status(1)
            break

        if not conn.launch_app(test_web_app):
            result_obj.set_status(1)
            break

        while True:
            if result_obj.get_status() == 1:
                test_set_finished = True
                break
            ret = http_request(
                get_url(server_url, "/check_server_status"), "GET", {})
            if ret is None:
                err_cnt += 1
                if err_cnt >= CNT_RETRY:
                    LOGGER.error(
                        "[ check server status time out, please check deivce! ]")
                    test_set_finished = True
                    result_obj.set_status(1)
                    break
            else:
                result_cases = ret.get("cases")
                error_code = ret.get("error_code")
                if error_code is not None:
                    if not conn.launch_app(test_web_app):
                        test_set_finished = True
                        result_obj.set_status(1)
                        break
                    if error_code == LAUNCH_ERROR:
                        relaunch_cnt += 1
                        if relaunch_cnt >= 3:
                            test_set_finished = True
                            result_obj.set_status(1)
                            break
                    elif error_code == BLOCK_ERROR:
                        relaunch_cnt = 0
                else:
                    err_cnt = 0
                    relaunch_cnt = 0

                if result_cases is not None and len(result_cases):
                    result_obj.extend_result(result_cases)
                elif exetype == 'manual':
                    LOGGER.info(
                        "[ please execute manual cases ]\r\n")

                if ret["finished"] == 1:
                    test_set_finished = True
                    result_obj.set_status(1)
                    break
                elif ret["block_finished"] == 1:
                    break
            time.sleep(2)
Exemplo n.º 18
0
def _core_test_exec(conn, test_set_name, exetype, cases_queue, result_obj):
    """function for running core tests"""
    exetype = exetype.lower()
    total_count = len(cases_queue)
    current_idx = 0
    manual_skip_all = False
    result_list = []
    for test_case in cases_queue:
        if result_obj.get_status() == 1:
            break

        current_idx += 1
        core_cmd = ""
        if "entry" in test_case:
            core_cmd = test_case["entry"]
        else:
            LOGGER.info(
                "[ Warnning: test script is empty,"
                " please check your test xml file ]")
            continue
        expected_result = test_case.get('expected_result', '0')
        time_out = int(test_case.get('timeout', '90'))
        measures = test_case.get('measures', [])
        retmeasures = []
        LOGGER.info("\n[core test] execute case:\nTestCase: %s\n"
                    "TestEntry: %s\nExpected: %s\nTotal: %s, Current: %s"
                    % (test_case['case_id'], test_case['entry'],
                       expected_result, total_count, current_idx))
        LOGGER.info("[ execute core test script, please wait ! ]")
        strtime = datetime.now().strftime(DATE_FORMAT_STR)
        LOGGER.info("start time: %s" % strtime)
        test_case["start_at"] = strtime
        if exetype == 'auto':
            return_code, stdout, stderr = conn.shell_cmd_ext(
                core_cmd, time_out, False)
            if return_code is not None and return_code != "timeout":
                test_case["result"] = "pass" if str(
                    return_code) == expected_result else "fail"
                test_case["stdout"] = stdout
                test_case["stderr"] = stderr
                for item in measures:
                    ind = item['name']
                    fname = item['file']
                    if fname is None:
                        continue
                    tmpname = os.path.expanduser("~") + os.sep + "mea_tmp"
                    if conn.download_file(fname, tmpname):
                        try:
                            config = ConfigParser.ConfigParser()
                            config.read(tmpname)
                            item['value'] = config.get(ind, 'value')
                            retmeasures.append(item)
                            os.remove(tmpname)
                        except IOError as error:
                            LOGGER.error(
                                "[ Error: fail to parse value,"
                                " error:%s ]\n" % error)
                test_case["measures"] = retmeasures
            else:
                test_case["result"] = "BLOCK"
                test_case["stdout"] = stdout
                test_case["stderr"] = stderr
        elif exetype == 'manual':
            # handle manual core cases
            try:
                # LOGGER.infopre-condition info
                if "pre_condition" in test_case:
                    LOGGER.info("\n****\nPre-condition: %s\n ****\n"
                                % test_case['pre_condition'])
                # LOGGER.infostep info
                if "steps" in test_case:
                    for step in test_case['steps']:
                        LOGGER.info(
                            "********************\n"
                            "Step Order: %s" % step['order'])
                        LOGGER.info("Step Desc: %s" % step['step_desc'])
                        LOGGER.info(
                            "Expected: %s\n********************\n"
                            % step['expected'])
                if manual_skip_all:
                    test_case["result"] = "N/A"
                else:
                    while True:
                        test_result = raw_input(
                            '[ please input case result ]'
                            ' (p^PASS, f^FAIL, b^BLOCK, n^Next, d^Done):')
                        if test_result.lower() == 'p':
                            test_case["result"] = "PASS"
                            break
                        elif test_result.lower() == 'f':
                            test_case["result"] = "FAIL"
                            break
                        elif test_result.lower() == 'b':
                            test_case["result"] = "BLOCK"
                            break
                        elif test_result.lower() == 'n':
                            test_case["result"] = "N/A"
                            break
                        elif test_result.lower() == 'd':
                            manual_skip_all = True
                            test_case["result"] = "N/A"
                            break
                        else:
                            LOGGER.info(
                                "[ Warnning: you input: '%s' is invalid,"
                                " please try again ]" % test_result)
            except IOError as error:
                LOGGER.info(
                    "[ Error: fail to get core manual test step,"
                    " error: %s ]\n" % error)
        strtime = datetime.now().strftime(DATE_FORMAT_STR)
        LOGGER.info("end time: %s" % strtime)
        test_case["end_at"] = strtime
        LOGGER.info("Case Result: %s" % test_case["result"])
        result_list.append(test_case)

    result_obj.extend_result(result_list, False)
    result_obj.set_status(1)
Exemplo n.º 19
0
    def run(self):
        """run core tests"""
        if self.cases_queue is None:
            return
        total_count = len(self.cases_queue)
        current_idx = 0
        manual_skip_all = False
        global TEST_SERVER_STATUS, TEST_SERVER_RESULT
        LOCK_OBJ.acquire()
        TEST_SERVER_RESULT = {"cases": []}
        TEST_SERVER_STATUS = {"finished": 0}
        result_list = []

        LOCK_OBJ.release()
        for test_case in self.cases_queue:
            current_idx += 1
            expected_result = "0"
            core_cmd = ""
            time_out = None
            measures = []
            retmeasures = []
            if "entry" in test_case:
                core_cmd = "sdb -s %s shell '%s ;  echo returncode=$?'" % (self.device_id, test_case["entry"])
            else:
                LOGGER.info("[ Warnning: test script is empty," " please check your test xml file ]")
                continue
            if "expected_result" in test_case:
                expected_result = test_case["expected_result"]
            if "timeout" in test_case:
                time_out = int(test_case["timeout"])
            if "measures" in test_case:
                measures = test_case["measures"]
            LOGGER.info(
                "\n[case] execute case:\nTestCase: %s\nTestEntry: %s\n"
                "Expected Result: %s\nTotal: %s, Current: %s"
                % (test_case["case_id"], test_case["entry"], expected_result, total_count, current_idx)
            )
            LOGGER.info("[ execute test script," "this might take some time, please wait ]")

            strtime = datetime.now().strftime(DATE_FORMAT_STR)
            LOGGER.info("start time: %s" % strtime)
            test_case["start_at"] = strtime
            if self.exetype == "auto":
                return_code, stdout, stderr = shell_command_ext(core_cmd, time_out, False)
                if return_code is not None:
                    actual_result = str(return_code)
                    if actual_result == "timeout":
                        test_case["result"] = "BLOCK"
                        test_case["stdout"] = "none"
                        test_case["stderr"] = "none"
                    else:
                        if actual_result == expected_result:
                            test_case["result"] = "pass"
                        else:
                            test_case["result"] = "fail"
                        test_case["stdout"] = stdout
                        test_case["stderr"] = stderr

                        for item in measures:
                            ind = item["name"]
                            fname = item["file"]
                            if fname is None:
                                continue
                            tmpname = os.path.expanduser("~") + os.sep + "measure_tmp"
                            if _download_file(self.device_id, fname, tmpname):
                                try:
                                    config = ConfigParser.ConfigParser()
                                    config.read(tmpname)
                                    item["value"] = config.get(ind, "value")
                                    retmeasures.append(item)
                                    os.remove(tmpname)
                                except IOError, error:
                                    LOGGER.error("[ Error: fail to parse value," " error:%s ]\n" % error)
                        test_case["measures"] = retmeasures
                else:
                    test_case["result"] = "BLOCK"
                    test_case["stdout"] = "none"
                    test_case["stderr"] = "none"
            elif self.exetype == "manual":
                # handle manual core cases
                try:
                    # LOGGER.infopre-condition info
                    if "pre_condition" in test_case:
                        LOGGER.info("\n****\nPre-condition: %s\n ****\n" % test_case["pre_condition"])
                    # LOGGER.infostep info
                    if "steps" in test_case:
                        for step in test_case["steps"]:
                            LOGGER.info("********************\n" "Step Order: %s" % step["order"])
                            LOGGER.info("Step Desc: %s" % step["step_desc"])
                            LOGGER.info("Expected: %s\n********************\n" % step["expected"])
                    if manual_skip_all:
                        test_case["result"] = "N/A"
                    else:
                        while True:
                            test_result = raw_input(
                                "[ please input case result ]" " (p^PASS, f^FAIL, b^BLOCK, n^Next, d^Done):"
                            )
                            if test_result.lower() == "p":
                                test_case["result"] = "PASS"
                                break
                            elif test_result.lower() == "f":
                                test_case["result"] = "FAIL"
                                break
                            elif test_result.lower() == "b":
                                test_case["result"] = "BLOCK"
                                break
                            elif test_result.lower() == "n":
                                test_case["result"] = "N/A"
                                break
                            elif test_result.lower() == "d":
                                manual_skip_all = True
                                test_case["result"] = "N/A"
                                break
                            else:
                                LOGGER.info(
                                    "[ Warnning: you input: '%s' is invalid," " please try again ]" % test_result
                                )
                except IOError, error:
                    LOGGER.info("[ Error: fail to get core manual test step," " error: %s ]\n" % error)
Exemplo n.º 20
0
    def merge_resultfile(self, start_time, latest_dir):
        """ merge_result_file """
        mergefile = mktemp(suffix='.xml', prefix='tests.', dir=latest_dir)
        mergefile = os.path.splitext(mergefile)[0]
        mergefile = os.path.splitext(mergefile)[0]
        mergefile = "%s.result" % BASENAME(mergefile)
        mergefile = "%s.xml" % mergefile
        mergefile = JOIN(latest_dir, mergefile)
        end_time = datetime.today().strftime("%Y-%m-%d_%H_%M_%S")
        LOGGER.info("\n[ test complete at time: %s ]" % end_time)
        LOGGER.debug("[ start merging test result xml files, "\
            "this might take some time, please wait ]")
        LOGGER.debug("[ merge result files into %s ]" % mergefile)
        root = etree.Element('test_definition')
        root.tail = "\n"
        totals = set()

        # merge result files
        resultfiles = self.resultfiles
        totals = self.__merge_result(resultfiles, totals)

        for total in totals:
            result_xml = etree.parse(total)
            for suite in result_xml.getiterator('suite'):
                if suite.getiterator('testcase'):
                    suite.tail = "\n"
                    root.append(suite)
        # print test summary
        self.__print_summary()
        # generate actual xml file
        LOGGER.info("[ generate result xml: %s ]" % mergefile)
        if self.skip_all_manual:
            LOGGER.info("[ some results of core manual cases are N/A,"
                        "please refer to the above result file ]")
        LOGGER.info("[ merge complete, write to the result file,"
                    " this might take some time, please wait ]")
        # get useful info for xml
        # add environment node
        # add summary node
        root.insert(0, get_summary(start_time, end_time))
        root.insert(0, self.__get_environment())
        # add XSL support to testkit-lite
        declaration_text = """<?xml version="1.0" encoding="UTF-8"?>
        <?xml-stylesheet type="text/xsl" href="testresult.xsl"?>\n"""
        try:
            with open(mergefile, 'w') as output:
                output.write(declaration_text)
                tree = etree.ElementTree(element=root)
                tree.write(output, xml_declaration=False, encoding='utf-8')
        except IOError as error:
            LOGGER.error(
                "[ Error: merge result file failed, error: %s ]" % error)
        # change &lt;![CDATA[]]&gt; to <![CDATA[]]>
        replace_cdata(mergefile)
        # copy result to -o option
        try:
            if self.resultfile:
                if os.path.splitext(self.resultfile)[-1] == '.xml':
                    if not os.path.exists(os.path.dirname(self.resultfile)):
                        if len(os.path.dirname(self.resultfile)) > 0:
                            os.makedirs(os.path.dirname(self.resultfile))
                    LOGGER.info("[ copy result xml to output file:"
                                " %s ]" % self.resultfile)
                    copyfile(mergefile, self.resultfile)
                else:
                    LOGGER.info(
                        "[ Please specify and xml file for result output,"
                        " not:%s ]" % self.resultfile)
        except IOError as error:
            LOGGER.error("[ Error: fail to copy the result file to: %s,"
                         " please check if you have created its parent directory,"
                         " error: %s ]" % (self.resultfile, error))
Exemplo n.º 21
0
    def __prepare_external_test_json(self, resultfile):
        """Run external test"""
        parameters = {}
        xml_set_tmp = resultfile
        # split set_xml by <case> get case parameters
        LOGGER.debug("[ split xml: %s by <case> ]" % xml_set_tmp)
        LOGGER.debug("[ this might take some time, please wait ]")
        try:
            parse_tree = etree.parse(xml_set_tmp)
            root_em = parse_tree.getroot()
            case_tmp = []
            for tset in root_em.getiterator('set'):
                case_order = 1
                parameters.setdefault(
                    "casecount", str(len(tset.getiterator('testcase')))
                )
                parameters.setdefault("current_set_name", xml_set_tmp)

                for tcase in tset.getiterator('testcase'):
                    case_detail_tmp = {}
                    step_tmp = []
                    parameters.setdefault(
                        "exetype", tcase.get('execution_type')
                    )

                    parameters.setdefault("type", tcase.get('type'))
                    case_detail_tmp.setdefault("case_id", tcase.get('id'))
                    case_detail_tmp.setdefault("purpose", tcase.get('purpose'))
                    case_detail_tmp.setdefault("order", str(case_order))
                    case_detail_tmp.setdefault("onload_delay", "3")

                    if tcase.find('description/test_script_entry') is not None:
                        tc_entry = tcase.find(
                            'description/test_script_entry').text
                        if not tc_entry:
                            tc_entry = ""
                        case_detail_tmp["entry"] = self.test_prefix + tc_entry
                        if tcase.find(
                                'description/test_script_entry').get('timeout'):
                            case_detail_tmp["timeout"] = tcase.find(
                                'description/test_script_entry'
                            ).get('timeout')
                        if tcase.find(
                            'description/test_script_entry'
                        ).get('test_script_expected_result'):
                            case_detail_tmp["expected_result"] = tcase.find(
                                'description/test_script_entry'
                            ).get('test_script_expected_result')
                    for this_step in tcase.getiterator("step"):
                        step_detail_tmp = {}
                        step_detail_tmp.setdefault("order", "1")
                        step_detail_tmp["order"] = str(this_step.get('order'))

                        if this_step.find("step_desc") is not None:
                            text = this_step.find("step_desc").text
                            if text is not None:
                                step_detail_tmp["step_desc"] = text

                        if this_step.find("expected") is not None:
                            text = this_step.find("expected").text
                            if text is not None:
                                step_detail_tmp["expected"] = text

                        step_tmp.append(step_detail_tmp)

                    case_detail_tmp['steps'] = step_tmp

                    if tcase.find('description/pre_condition') is not None:
                        text = tcase.find('description/pre_condition').text
                        if text is not None:
                            case_detail_tmp["pre_condition"] = text

                    if tcase.find('description/post_condition') is not None:
                        text = tcase.find('description/post_condition').text
                        if text is not None:
                            case_detail_tmp['post_condition'] = text

                    if tcase.get('onload_delay') is not None:
                        case_detail_tmp[
                            'onload_delay'] = tcase.get('onload_delay')
                    # Check performance test
                    if tcase.find('measurement') is not None:
                        measures = tcase.getiterator('measurement')
                        measures_array = []
                        for measure in measures:
                            measure_json = {}
                            measure_json['name'] = measure.get('name')
                            measure_json['file'] = measure.get('file')
                            measures_array.append(measure_json)
                        case_detail_tmp['measures'] = measures_array
                    case_tmp.append(case_detail_tmp)
                    case_order += 1
            parameters.setdefault("cases", case_tmp)
            if self.bdryrun:
                parameters.setdefault("dryrun", True)
            self.set_parameters = parameters

        except IOError as error:
            LOGGER.error("[ Error: fail to prepare cases parameters, "
                         "error: %s ]\n" % error)
            return False
        return True