Exemplo n.º 1
0
 def get_values_for_optional_args(self, arg_kv):
     """The values for optional arguments as a python dictionary
     """
     print_debug("getting values for optional arguments")
     for args in self.optional_args_list:
         if args in self.args_repository:
             arg_kv[args] = self.args_repository[args]
         elif args in self.data_repository:
             arg_kv[args] = self.data_repository[args]
         else:
             arg_kv[args] = self.default_dict[args]
             print_debug("executing with default value '{0}' for optional "
                         "argument '{1}'".format(arg_kv[args], args))
     for args in self.optional_args_list:
         # requires another loop since system_name may not be at beginning
         if args != 'system_name' and 'system_name' in arg_kv and self.data_repository[
                 'wt_datafile'] != 'NO_DATA':
             # the args can be direct values or mentioned as
             # wtag var (except system_name) like 'wtag=<wtag var>',
             # which would be fetched from the input data file
             value = self.get_credential_value(arg_kv[args],
                                               arg_kv['system_name'])
             if value is not None:
                 arg_kv[args] = value
     else:
         if hasattr(self, 'tag_dict'):
             del self.tag_dict
     return arg_kv
    def check_get_datatype(self, datafile):
        """Check and get the datatype for testcase
        """

        data_type = xml_Utils.getChildTextbyParentTag(self.filepath, 'Details',
                                                      'Datatype')
        if str(datafile).upper().strip() == 'NO_DATA':
            data_type = 'CUSTOM'
            print_debug('This test case will be run without any InputDataFile')

        elif data_type is None or data_type is False or \
                str(data_type).strip() == "":
            data_type = 'CUSTOM'

        elif data_type is not None and data_type is not False:
            data_type = str(data_type).strip()
            supported_values = ['iterative', 'custom', 'hybrid']
            if data_type.lower() not in supported_values:
                print_warning("unsupported value '{0}' provided for data_type,"
                              " supported values are " \
                              "'{1}' and case-insensitive".format(data_type, supported_values))
                print_info(
                    "Hence using default value for data_type which is 'custom'"
                )
                data_type = 'CUSTOM'
        return data_type
def report_testcase_result(tc_status, data_repository, tag="Steps"):
    """Report the testcase result to the result file

    :Arguments:
        1. tc_status (bool) = status of the executed testcase
        2. data_repository (dict) = data_repository of the executed  testcase
    """
    print_info("**** Testcase Result ***")
    tc_duration = Utils.data_Utils.get_object_from_datarepository("tc_duration")
    print_info("TESTCASE:{0}  STATUS:{1} | Duration = {2}".format(data_repository['wt_name'],
                                                 convertLogic(tc_status), tc_duration))
    print_debug("\n")
    Utils.testcase_Utils.pTestResult(tc_status, data_repository['wt_resultfile'])
    root = Utils.xml_Utils.getRoot(data_repository['wt_resultfile'])
    fail_count = 0
    for value in root.findall('Keyword'):
        kw_status = value.find('KeywordStatus').text
        if kw_status != "PASS" and kw_status != "RAN":
            fail_count += 1
            kw_name = value.find('Name').text
            get_step_value = list(value.attrib.values())
            step_num = ','.join(get_step_value)
            if fail_count == 1:
                print_info("++++++++++++++++++++++++ Summary of Failed Keywords +++++++++++++++++++"
                           "+++++")
                print_info("{0:15} {1:60} {2:10}".format('StepNumber', 'KeywordName', 'Status'))
                print_info("{0:15} {1:60} {2:10}".format(str(step_num), tag+"-"+str(kw_name),
                                                         str(kw_status)))
            elif fail_count > 1:
                print_info("{0:15} {1:60} {2:10}".format(str(step_num), tag+"-"+str(kw_name),
                                                         str(kw_status)))
    print_info("=================== END OF TESTCASE ===========================")
Exemplo n.º 4
0
def execute_iterative_parallel(step_list, data_repository, tc_status,
                               system_list):
    """Takes a list of steps as input and executes them in parallel by
    creating separate process of step_driver for each of these steps """

    jobs_list = []
    output_q = None
    for system_name in system_list:
        target_module = testcase_steps_execution.main
        #args_list = [step_list, data_repository, system_name, True]
        args_dict = OrderedDict([
            ("step_list", step_list),
            ("data_repository", data_repository),
            ("system_name", system_name),
            ("kw_parallel", True),
            ("output_q", output_q),
        ])

        process, jobs_list, output_q = create_and_start_process_with_queue(
            target_module, args_dict, jobs_list, output_q)

    print_debug("process: {0}".format(process))
    for job in jobs_list:
        job.join()

    result_list = get_results_from_queue(output_q)

    system_status_list = []
    system_resultfile_list = []
    step_impact_list = []
    tc_junit_list = []

    for result in result_list:
        step_status_list = result[0]
        kw_resultfile_list = result[1]
        system_name = result[2]
        step_impact_list = result[3]
        tc_junit_list.append(result[4])
        system_status = testcase_Utils.compute_status_using_impact(
            step_status_list, step_impact_list)
        system_resultfile = testcase_Utils.compute_system_resultfile(
            kw_resultfile_list, data_repository['wt_resultsdir'], system_name)
        system_status_list.append(system_status)
        system_resultfile_list.append(system_resultfile)

    tc_status = Utils.testcase_Utils.compute_status_without_impact(
        system_status_list)
    # parallel keywords generate multiple keyword junit result files
    # each files log the result for one keyword and not intergrated
    # update testcase junit result file with individual keyword result files
    data_repository['wt_junit_object'] = update_tc_junit_resultfile(
        data_repository['wt_junit_object'], tc_junit_list,
        data_repository['wt_tc_timestamp'])
    print_debug("Updating Testcase result file...")
    Utils.testcase_Utils.append_result_files(data_repository['wt_resultfile'],
                                             system_resultfile_list)

    return tc_status
Exemplo n.º 5
0
 def _compute_testcase_status(self):
     """
     """
     tc_status = testcase_Utils.compute_status_without_impact(
         self.system_status_list)
     print_debug("Updating Testcase result file...")
     testcase_Utils.append_result_files(self.data_repository['wt_resultfile'],\
         self.system_resultfile_list)
     return tc_status
    def convert_arg_to_datatype(self):
        """Parses the input argument to find the data type requested by the user

        This is based on the starting letters of the argument name
        If arg_name starts with:
        1. str_ = string
        2. int_ = integer
        3. float_ = float
        4. bool_ = boolean
        5. list_ = list
        6. tuple_ = tuple
        7. dict_ = dict

        Default: if none of the above naming convention matches then the argument value
        will be treated as a string
        """
        self.datatype = None
        if self.arg_name.startswith('str_'):
            return self.arg_value
        elif self.arg_name.startswith('int_'):
            self.datatype = int
        elif self.arg_name.startswith('float_'):
            self.datatype = float
        elif self.arg_name.startswith('bool_'):
            self.datatype = bool
            if self.arg_value.lower() == "true":
                self.arg_value = "True"
            elif self.arg_value.lower() == "false":
                self.arg_value = "False"
        elif self.arg_name.startswith('list_'):
            self.datatype = list
        elif self.arg_name.startswith('tuple_'):
            self.datatype = tuple
        elif self.arg_name.startswith('dict_'):
            self.datatype = dict
        elif self.arg_name.startswith('file_'):
            self.datatype = IOBase
            tc_path = config_Utils.tc_path
            fname = file_Utils.getAbsPath(self.arg_value, tc_path)
            try:
                self.arg_value = open(fname)
            except IOError:
                print_warning("given file {} does not exist, please check, it "
                              "should be relative to testcase path {}".format(
                                  fname, tc_path))
        else:
            # User has not specified any data type with the argument, but it can be
            # given a proper type through wtag or will be treated as string (default)
            return self.arg_value
        if self.datatype is not None:
            convert_msg = "Input argument {0} will be converted to a {1}".format(
                self.arg_name, self.datatype)
            print_debug(convert_msg)

        result = self.convert_string_to_datatype()
        return result
def execute_iterative_parallel_testcases(system_list, testcase_list, suite_repository,
                                         data_repository, from_project, tc_parallel=True,
                                         auto_defects=False):
    """Takes a list of systems as input and executes the testcases in parallel by
    creating separate process of testcase_driver for each of these systems """

    jobs_list = []
    output_q = None

    for system in system_list:
        target_module = sequential_testcase_driver.main
        tc_args_dict = OrderedDict([("testcase_list", testcase_list),
                                    ("suite_repository", suite_repository),
                                    ("data_repository", data_repository),
                                    ("from_project", from_project),
                                    ("auto_defects", auto_defects),
                                    ("system", system),
                                    ("tc_parallel", tc_parallel),
                                    ("output_q", output_q),
                                    ("ts_iter", True)
                                    ])

        process, jobs_list, output_q = create_and_start_process_with_queue(
         target_module, tc_args_dict, jobs_list, output_q)

    print_debug("process: {0}".format(process))
    for job in jobs_list:
        job.join()

    result_list = get_results_from_queue(output_q)

    tc_status_list = []
    tc_name_list = []
    tc_impact_list = []
    tc_duration_list = []
    # Get the junit object of each testcase, extract the information from it
    # and combine with testsuite junit object
    tc_junit_list = []

    # Suite results
    for result in result_list:
        # Case results
        for val in range(len(result[0])):
            tc_status_list.append(result[0][val])
            tc_name_list.append(result[1])
            tc_impact_list.append(result[2][val])
            tc_duration_list.append(result[3][val])
        tc_junit_list.append(result[4])
    # parallel testcases generate multiple testcase junit result files
    # each files log the result for one testcase and not intergrated
    # update testsuite junit result file with individual testcase result files
    update_ts_junit_resultfile(suite_repository['wt_junit_object'],
                               tc_junit_list, data_repository['wt_ts_timestamp'])
    testsuite_status = Utils.testcase_Utils.compute_status_using_impact(tc_status_list,
                                                                        tc_impact_list)
    return testsuite_status
Exemplo n.º 8
0
 def p_subkeyword(self, keyword_txt):
     """ Creates a Keyword tag as the child node to the <SubStep> tag """
     self.gsubkeyloop = self.gsubkeyloop + 1
     print_debug("***************Sub-Keyword: %s "
                 "***************" % keyword_txt)
     self.gsubkey[self.gsubkeyloop] = ET.SubElement(\
             self.gstep[self.gsteploop], "Keyword")
     self.current_pointer = self.gsubkey[self.gsubkeyloop]
     self.gsubkey[self.gsubkeyloop].text = keyword_txt
     self.print_output()
Exemplo n.º 9
0
    def p_substep(self, substep_txt=""):
        """Create a substep tag"""
        self.gsubsteploop = self.gsubsteploop + 1
        self.gsubstep[self.gsubsteploop] = ET.SubElement(self.gstep[\
                                                self.gsteploop], "SubStep")
        self.current_pointer = self.gsubstep[self.gsubsteploop]
        self.gsubstep[self.gsubsteploop].text = substep_txt
        print_debug("<< Substep >>")
        print_debug("Keyword Description: {0}".format(substep_txt))

        self.print_output()
Exemplo n.º 10
0
 def p_fail(self, level, text=""):
     """Report a fail """
     kw_duration = Utils.data_Utils.get_object_from_datarepository(
         "kw_duration")
     if "KEYWORD" in text and kw_duration:
         print_info("{0} STATUS:FAIL | Duration = {1}".format(
             text, kw_duration))
     else:
         print_debug("{0} STATUS:FAIL".format(text))
     #print_info("FAIL: %s\n" % text)
     self.p_status("FAIL", level)
def get_step_console_log(filename, logsdir, console_name):
    """Assign seperate console logfile for each step in parallel execution """

    console_logfile = Utils.file_Utils.getCustomLogFile(
        filename, logsdir, console_name)
    print_debug("************ This is parallel execution ************")
    print_info("... console logs for {0} will be logged in {1} ".format(
        console_name, console_logfile))
    Utils.config_Utils.debug_file(console_logfile)

    return console_logfile
Exemplo n.º 12
0
    def testcase_prerun(self, tc_filepath, check_files_dict=None):
        """Executes prerun of a testcase file """
        print_debug('\n')
        print_debug('=' * 40)
        print_debug("Validating Test case xml")
        print_debug('=' * 40)

        testcase_xsd_fullpath = self.xsd_dir + os.sep + 'warrior_testcase.xsd'
        #print_info("Test case_xsd_location: {0}".format(testcase_xsd_fullpath))

        tc_status = self.xml_to_xsd_validation(tc_filepath,
                                               testcase_xsd_fullpath)

        if tc_status:
            data_file_valid = self.check_tc_input_datafile(
                tc_filepath, check_files_dict)
            tc_status &= data_file_valid
            steps_field_valid = self.check_steps(tc_filepath)
            tc_status &= steps_field_valid
        else:
            print_error("Incorrect xml format")
        time.sleep(5)
        status = testcase_Utils.convertLogic(tc_status)
        print_info('TC STATUS: {0}ED'.format(status))

        return tc_status
    def ssh_con(self,
                retries=1,
                interval=1,
                timeout=60,
                verify_keys=False,
                invoke_shell=False):
        """Connects to the host using ssh object

        :Arguments:
            1. retries =  No of attempts before returning fail
            2. interval = Time to wait before the next retry
            3. timeout = wait for response
            4. verify_keys = Verify the host entry is available in host key

        :Returns:
            1. status(bool)= True / False

        """
        for attempt in range(retries):
            print_debug("Attempt{} connecting to {}".format(
                attempt + 1, self.target))
            try:
                if not verify_keys:
                    self.sshobj.set_missing_host_key_policy(\
                                                self.param.AutoAddPolicy())
                self.sshobj.connect(self.target,
                                    self.port,
                                    self.uid,
                                    self.pid,
                                    timeout=timeout,
                                    look_for_keys=verify_keys)
                if invoke_shell:
                    print_info("Opening shell for {}".format(self.sshobj))
                    self.sshobj.invoke_shell()

                if self.logfile is not None:
                    self.log = open(self.logfile, 'w')
            except self.param.SSHException:
                print_error(" ! could not connect to %s...check logs" %
                            self.target)
                return False
            except Exception as err:
                print_error("Login failed {0}".format(str(err)))
                sleep(interval)
                continue
            else:
                print_info("Connected to the host")
                return True
        return False
Exemplo n.º 14
0
    def report_substep_status(self, status):
        """ Same as reportWarning created to have a more meaningful name for the function...
        Reports the status of a substep to the testcase xml result file base don the received status
        On receiving a True reports substep as Passed
        On receiving a False raises a warning
        On receiving a Skip reports skips the substep execution

        :Arguments:
            1. status = (bool) True or False
            2. text = (string) any useful description
            3. level = (string) only supported value currently is subStep

        :Returns:
            None
        """
        print_debug("<< Substep status >>")
        self.report_warning(status)
def send_email(smtp_host, sender, receivers, subject, body, files):
    """ sends email from smtp server using input arguments:
    :Arguments:
        1. smtp_host - smtp host name
        2. sender - sender email ID
        3. receivers - receiver email ID(s)
        4. subject - email subject line
        5. body - email body
        6. files - files to be attached
    """
    if not smtp_host:
        print_debug("No smtp host defined in w_settings, no email sent")
        return
    if not receivers:
        print_debug("No receiver defined in w_settings, no email sent")
        return

    message = MIMEMultipart()
    message['From'] = sender
    message['To'] = receivers
    receivers_list = [receiver.strip() for receiver in receivers.split(',')]
    message['Subject'] = subject

    # HTML is used for better formatting of mail body
    part = MIMEText(body, 'html')
    message.attach(part)

    for attach_file in files or []:
        with open(attach_file, "rb") as fil:
            part = MIMEBase('application', 'octet-stream')
            part.set_payload((fil).read())
            encoders.encode_base64(part)
            part.add_header('Content-Disposition', "attachment;filename= %s"
                            % basename(attach_file))
            message.attach(part)

    try:
        smtp_obj = smtplib.SMTP(smtp_host)
        smtp_obj.sendmail(sender, receivers_list, message.as_string())
        pNote('Execution results emailed to receiver(s): {}'.format(receivers))
        smtp_obj.close()

    except BaseException:
        pNote("Error occurred while sending email, check w_settings.xml"
              "configuration for correct smtp host, "
              "receiver email address etc.")
def execute_custom_sequential(step_list, data_repository, tc_status,
                              system_name):
    """ Takes a list of steps as input and executes
    them sequentially by sending then to the
    testcase_steps_execution driver Executes all the steps in custom sequential fashion """

    step_status_list, kw_resultfile_list,\
    step_impact_list = testcase_steps_execution.main(step_list, data_repository, system_name)

    tc_status = Utils.testcase_Utils.compute_status_using_impact(
        step_status_list, step_impact_list)

    print_debug("Updating Testcase result file...")
    Utils.testcase_Utils.append_result_files(data_repository['wt_resultfile'],
                                             kw_resultfile_list)

    return tc_status
Exemplo n.º 17
0
def execute_iterative_sequential(step_list, data_repository, tc_status, system_list):
    """ Executes all the steps in iterative sequential fashion """

    system_status_list = []
    system_resultfile_list = []

    for system in system_list:
        step_status_list, kw_resultfile_list, step_impact_list = testcase_steps_execution.main(step_list, data_repository, system_name=system)
        system_status = Utils.testcase_Utils.compute_status_using_impact(step_status_list,
                                                                         step_impact_list)
        system_resultfile = compute_system_resultfile(kw_resultfile_list, data_repository['wt_resultsdir'], system)
        system_status_list.append(system_status)
        system_resultfile_list.append(system_resultfile)

    tc_status = Utils.testcase_Utils.compute_status_without_impact(system_status_list)
    print_debug("Updating Testcase result file...")
    Utils.testcase_Utils.append_result_files(data_repository['wt_resultfile'], system_resultfile_list)

    return tc_status
Exemplo n.º 18
0
        def send_command(cls, *args, **kwargs):
            """
                Get response from the processed response dict

                The order of getting response match is:
                cmd specific response with simresp > global response with simresp > 
                cmd specific response default > global response default
            """
            pNote(":CMD: %s" % (args[3]))
            # response reference dict contains all command with simresp
            # args[3] is the cmd text
            if WarCli.sim:
                response = cmd_resp_lookup(args[3])
            # if default is not found or in mock mode, return empty response
            else:
                response = ""

            print_debug("Response:\n{0}\n".format(response))
            return True, response
 def _report_step_as_not_run(self):
     """
     This function handles reporting of a step as not run.
     """
     keyword = self.current_step.get('Keyword')
     kw_resultfile = step_driver.get_keyword_resultfile(
         self.data_repository, self.system_name, self.current_step_number,
         keyword)
     Utils.config_Utils.set_resultfile(kw_resultfile)
     Utils.testcase_Utils.pKeyword(keyword, self.current_step.get('Driver'))
     Utils.testcase_Utils.reportStatus('Skip')
     self.kw_resultfile_list.append(kw_resultfile)
     self.data_repository['wt_junit_object'].update_count(
         "skipped", "1", "tc", self.data_repository['wt_tc_timestamp'])
     self.data_repository['wt_junit_object'].update_count(
         "keywords", "1", "tc", self.data_repository['wt_tc_timestamp'])
     kw_start_time = Utils.datetime_utils.get_current_timestamp()
     step_impact = Utils.testcase_Utils.get_impact_from_xmlfile(
         self.current_step)
     impact_dict = {"IMPACT": "Impact", "NOIMPACT": "No Impact"}
     self.data_repository['wt_junit_object'].add_keyword_result(
         self.data_repository['wt_tc_timestamp'], self.current_step_number,
         keyword, "SKIPPED", kw_start_time, "0", "skipped",
         impact_dict.get(step_impact.upper()), "N/A")
     self.data_repository['step_{}_result'.format(
         self.current_step_number)] = "SKIPPED"
     self.go_to_step_number = False
     if self.current_triggered_action.upper() in [
             'ABORT', 'ABORT_AS_ERROR'
     ]:
         return self.current_step_number, self.go_to_step_number, "break"
     elif self.current_triggered_action.upper() in ['SKIP', 'NEXT']:
         return self.current_step_number, self.go_to_step_number, "continue"
     elif self.current_triggered_action == "SKIP_INVOKED":
         if self.skip_invoked:
             print_debug("Skipping this step as it is an invoked step.")
             return self.current_step_number, self.go_to_step_number, "continue"
     # when 'onError:goto' value is less than the current step num,
     # change the next iteration point to goto value
     elif self.current_triggered_action and int(self.current_triggered_action) < \
             self.current_step_number:
         self.current_step_number = int(self.current_triggered_action) - 1
     return self.current_step_number, self.go_to_step_number, "continue"
Exemplo n.º 20
0
 def _get_element(self, browser, locator, **kwargs):
     """gets the element with matching criteria
     uses other private methods """
     findall = kwargs.get('findall', None)
     prefix, value = self._parse_locator(locator)
     if prefix is None:
         raise ValueError(("Strategy to find elements is "\
                           "not provided in the locator={0}".format(locator)))
     locator_function = self._get_strategy_function(prefix)
     if not locator_function:
         raise ValueError(("{0} in locator={1} is not a "\
                           "supported strategy to find elements.".format(prefix, locator)))
     try:
         element = locator_function(value, browser, findall)
     except NoSuchElementException as exception:
         #print_exception(exception)
         element = None
     else:
         print_debug("Element found")
     return element
Exemplo n.º 21
0
    def open_browser(self,
                     browser_name='firefox',
                     webdriver_remote_url=False,
                     desired_capabilities=None,
                     **kwargs):
        """Open a browser session"""

        profile_dir = kwargs.get('profile_dir', None)
        if 'profile_dir' in kwargs:
            kwargs.pop('profile_dir')

        if webdriver_remote_url:
            print_debug("Opening browser '{0}' through remote server at '{1}'"\
                        .format(browser_name, webdriver_remote_url))
        else:
            print_debug("Opening browser '%s'" % (browser_name))
        browser_name = browser_name
        browser = self._make_browser(browser_name, desired_capabilities,
                                     profile_dir, webdriver_remote_url,
                                     **kwargs)
        return browser
Exemplo n.º 22
0
    def get_values_for_mandatory_args(self):
        """The values for mandatory arguments as a python dictionary
        """
        def get_value(arg):
            """get the value for arg from args or data repository
            """
            if arg in self.args_repository:
                return self.args_repository[arg]
            if arg in self.data_repository:
                return self.data_repository[arg]
            print_error("value for mandatory argument '{0}' not available in "
                        "data_repository/args_repository".format(args))
            return None

        print_debug("getting values for mandatory arguments")
        arg_kv = {}
        sysname = 'system_name'
        args_list = self.req_args_list[:]
        if sysname in args_list:
            arg_kv[sysname] = get_value(sysname)
            if arg_kv[sysname] is None:
                del arg_kv[sysname]
            args_list.remove(sysname)
        for args in args_list:
            value = get_value(args)
            if value is None:
                continue
            if sysname in arg_kv and self.data_repository[
                    'wt_datafile'] != 'NO_DATA':
                # the args can be direct values or mentioned as
                # wtag var (except system_name) like 'wtag=<wtag var>',
                # which would be fetched from the input data file
                value = self.get_credential_value(value, arg_kv[sysname])
                if value is not None:
                    arg_kv[args] = value
            else:
                arg_kv[args] = value
        return arg_kv
Exemplo n.º 23
0
    def search_for_match(cls, package_list, keyword, driver_name):
        """Searches for method or keyword that matches
        the keyword and returns a matching list
        Returns None of no match/duplicates found """

        drv_obj = kw_driver_class.ModuleOperations(package_list, keyword)

        search_result_list = drv_obj.matching_method_list + drv_obj.matching_function_list
        if len(search_result_list) == 1:
            print_debug("Found one matching method/function for "\
                       "keyword '{0}'".format(keyword))
        elif len(search_result_list) == 0:
            print_warning("There is no matching keyword: '{0}' "\
            "for the Driver: '{1}'".format(keyword,
                                           driver_name))
            search_result_list = None
        elif len(search_result_list) > 1:
            print_debug("More than one method/function of same name: '{0}' "\
            "exists for the Driver: '{1}'".format(keyword,
                                                  driver_name))
            search_result_list = None

        return search_result_list
Exemplo n.º 24
0
    def xml_to_xsd_validation(file_xml, file_xsd):
        """ Verify that the XML compliance with XSD
        Arguments:
            1. file_xml: Input xml file
            2. file_xsd: xsd file which needs to be validated against xml
        Return:
            No return value
        """
        try:
            print_debug("Validating:{0}".format(file_xml))
            print_debug("xsd_file:{0}".format(file_xsd))
            xml_doc = parse(file_xml)
            xsd_doc = parse(file_xsd)
            xmlschema = XMLSchema(xsd_doc)
            xmlschema.assert_(xml_doc)
            return True

        except XMLSyntaxError as err:
            print_error("PARSING ERROR:{0}".format(err))
            return False

        except AssertionError as err:
            print_error("Incorrect XML schema: {0}".format(err))
            return False
Exemplo n.º 25
0
    def _update_skip_results(self, step, system_name, step_num):
        """
         update_skip_results
        """
        keyword = step.get('Keyword')
        kw_resultfile = step_driver.get_keyword_resultfile(
            self.data_repository, system_name, step_num, keyword)
        keyword_description = testcase_Utils.get_description_from_xmlfile(step)
        config_Utils.set_resultfile(kw_resultfile)
        testcase_Utils.pKeyword(keyword, step.get('Driver'))
        testcase_Utils.reportStatus('Skip')
        print_debug(
            "\n-----------------------------------------------------\n")
        self.data_repository['wt_junit_object'].update_count("skipped", "1", "tc",\
            self.data_repository['wt_tc_timestamp'])
        self.data_repository['wt_junit_object'].update_count("keywords", "1", "tc",\
            self.data_repository['wt_tc_timestamp'])
        self.data_repository['wt_junit_object'].add_keyword_result(\
            self.data_repository['wt_tc_timestamp'], step_num, keyword, "SKIPPED",\
            "skipped", "skipped", "skipped", "skipped", "skipped", keyword_description)

        self.data_repository['step_{}_result'.format(step_num)] = "SKIPPED"
        result = ("Skip", kw_resultfile, None, None)
        return result
    def check_get_datafile(self):
        """Check InputDatFile tag in the xml file and
        based on the values return the datafile to be used for the testcase/testsuite
            - If user provided a datafile, will use it.
            - If user specified 'Default' will use the default datafile
            - If user did not provide any value will use default datafile
            - If user specified 'NODATA' will print a msg saying so.
        """

        datafile = xml_Utils.getChildTextbyParentTag(self.filepath, 'Details',
                                                     'InputDataFile')
        if datafile is None or datafile is False or \
                str(datafile).strip() == "":
            if self.filetype == "tc":
                # print "get default datatype for testcase"
                datafile = get_default_xml_datafile(self.filepath)
            if self.filetype == "ts":
                # Check if test suite datatype starts with iterative.
                # If yes then get default datafile else set it as false
                # this is because at testsuite level input datafile is
                # supported only if the suite datatype is iterative seq/parallel
                datatype = self.check_get_datatype(False)
                if str(datatype).lower().startswith("iterative"):
                    datafile = get_default_xml_datafile(self.filepath)
                else:
                    datafile = False
            elif self.filetype == "proj":
                datafile = False
        elif str(datafile).strip().upper() == "DEFAULT":
            print_debug(
                "This testcase will be executed using the default InputDataFile"
            )
            datafile = get_default_xml_datafile(self.filepath)
        elif str(datafile).strip().upper() == 'NO_DATA':
            print_debug('This test case will be run without any InputDataFile')
            datafile = "NO_DATA"

        elif datafile is not None and datafile is not False:
            datafile_rel = str(datafile).strip()
            datafile = file_Utils.getAbsPath(datafile_rel,
                                             os.path.dirname(self.filepath))

        if str(datafile).strip().upper(
        ) != 'NO_DATA' and datafile is not False:
            if not file_Utils.fileExists(datafile):
                print_debug('\n')
                print_error("!!! *** InputDataFile does not exist in provided path:" \
                            "{0} *** !!!".format(datafile))
        return datafile
    def manual_defects(self, paths):
        """parse file list and create jira issue for each failures"""
        print_debug("manual-create defects")

        if self.path_type == "dir":
            defects_json_list = []
            i = 0
            abs_cur_dir = os.path.abspath(os.curdir)
            for path in paths:
                i += 1
                print_info("Directory {0}: {1}".format(i, path))
                defect_dir = file_Utils.getAbsPath(path, abs_cur_dir)
                if file_Utils.dirExists(defect_dir):
                    for j_file in os.listdir(path):
                        j_file = os.path.join(path, j_file)
                        if j_file is not None:
                            check_file = self.check_defect_file(j_file)
                            if check_file is not None:
                                defects_json_list.append(check_file)
                else:
                    print_error("Directory does not exist in provided path {0} "\
                                "relative to cwd".format(path))
                print_debug("\n")
        else:
            defects_json_list = []
            i = 0
            for path in paths:
                i += 1
                print_info("File {0}: {1}".format(i, path))
                check_file = self.check_defect_file(path)
                if check_file is not None:
                    defects_json_list.append(check_file)
                print_debug("\n")

        if len(defects_json_list) == 0:
            print_info("No defect json files found")
            exit(0)
        elif len(defects_json_list) > 0:
            for j_file in defects_json_list:
                data_repository = self.defects_json_parser(j_file)
                if data_repository is not None:
                    data_repository['jiraproj'] = self.jiraproj
                    defect_obj = defects_driver.DefectsDriver(data_repository)
                    if defect_obj.connect_warrior_jira() is True:
                        defect_obj.create_jira_issues([j_file])
def execute_testcase(testcase_filepath, data_repository, tc_context,
                     runtype, tc_parallel, queue, auto_defects, suite, jiraproj,
                     tc_onError_action, iter_ts_sys, steps_tag="Steps"):
    """ Executes the testcase (provided as a xml file)
            - Takes a testcase xml file as input and executes each command in the testcase.
            - Computes the testcase status based on the stepstatus and the impact value of the step
            - Handles step failures as per the default/specific onError action/value
            - Calls the function to report the testcase status

    :Arguments:
        1. testcase_filepath (string) = the full path of the testcase xml file
        2. execution_dir (string) = the full path of the directory under which the
                                    testcase execution directory will be created
                                    (the results, logs for this testcase will be
                                    stored in this testcase execution directory.)
    """

    tc_status = True
    tc_start_time = Utils.datetime_utils.get_current_timestamp()
    tc_timestamp = str(tc_start_time)
    print_info("[{0}] Testcase execution starts".format(tc_start_time))
    get_testcase_details(testcase_filepath, data_repository, jiraproj)
    #get testwrapperfile details like testwrapperfile, data_type and runtype
    testwrapperfile, j_data_type, j_runtype, setup_on_error_action = \
        get_testwrapper_file_details(testcase_filepath, data_repository)
    data_repository['wt_testwrapperfile'] = testwrapperfile
    isRobotWrapperCase = check_robot_wrapper_case(testcase_filepath)

    # These lines are for creating testcase junit file
    from_ts = False
    pj_junit_display = 'False'
    if not 'wt_junit_object' in data_repository:
        # not from testsuite
        tc_junit_object = junit_class.Junit(filename=data_repository['wt_name'],
                                            timestamp=tc_timestamp,
                                            name="customProject_independant_testcase_execution",
                                            display=pj_junit_display)
        if "jobid" in data_repository:
            tc_junit_object.add_jobid(data_repository["jobid"])
            del data_repository["jobid"]
        tc_junit_object.create_testcase(location=data_repository['wt_filedir'],
                                        timestamp=tc_timestamp,
                                        ts_timestamp=tc_timestamp,
                                        name=data_repository['wt_name'],
                                        testcasefile_path=data_repository['wt_testcase_filepath'])
        junit_requirements(testcase_filepath, tc_junit_object, tc_timestamp)
        data_repository['wt_ts_timestamp'] = tc_timestamp
    else:
        tag = "testcase" if steps_tag == "Steps" else steps_tag
        tc_junit_object = data_repository['wt_junit_object']
        #creates testcase based on tag given Setup/Steps/Cleanup
        tc_junit_object.create_testcase(location="from testsuite", timestamp=tc_timestamp,
                                        ts_timestamp=data_repository['wt_ts_timestamp'],
                                        classname=data_repository['wt_suite_name'],
                                        name=data_repository['wt_name'],
                                        tag=tag,
                                        testcasefile_path=data_repository['wt_testcase_filepath'])
        from_ts = True
        junit_requirements(testcase_filepath, tc_junit_object, data_repository['wt_ts_timestamp'])
    data_repository['wt_tc_timestamp'] = tc_timestamp
    data_repository['tc_parallel'] = tc_parallel
    data_type = data_repository['wt_data_type']
    if not from_ts:
        data_repository["war_parallel"] = False

    # Adding resultsdir, logsdir, title as attributes to testcase_tag in the junit result file
    # Need to remove these after making resultsdir, logsdir as part of properties tag in testcase
    tc_junit_object.add_property("resultsdir", os.path.dirname(data_repository['wt_resultsdir']),
                                 "tc", tc_timestamp)
    tc_junit_object.update_attr("console_logfile", data_repository['wt_console_logfile'],
                                "tc", tc_timestamp)
    tc_junit_object.update_attr("title", data_repository['wt_title'], "tc", tc_timestamp)
    tc_junit_object.update_attr("data_file", data_repository['wt_datafile'], "tc", tc_timestamp)
    if data_repository['wt_mapfile']:
        tc_junit_object.update_attr("mapfile", data_repository['wt_mapfile'], "tc", tc_timestamp)

    data_repository['wt_junit_object'] = tc_junit_object
    print_testcase_details_to_console(testcase_filepath, data_repository, steps_tag)
    # Prints the path of result summary file at the beginning of execution
    if data_repository['war_file_type'] == "Case":
        filename = os.path.basename(testcase_filepath)
        html_filepath = os.path.join(data_repository['wt_resultsdir'],
                                     Utils.file_Utils.getNameOnly(filename)) + '.html'
        print_info("HTML result file: {0}".format(html_filepath))
    #get the list of steps in the given tag - Setup/Steps/Cleanup
    step_list = common_execution_utils.get_step_list(testcase_filepath,
                                                     steps_tag, "step")
    if not step_list:
        print_warning("Warning! cannot get steps for execution")
        tc_status = "ERROR"

    if step_list and not len(step_list):
        print_warning("step list is empty in {0} block".format(steps_tag))

    tc_state = Utils.xml_Utils.getChildTextbyParentTag(testcase_filepath,
                                                       'Details', 'State')
    if tc_state is not False and tc_state is not None and \
       tc_state.upper() == "DRAFT":
        print_warning("Testcase is in 'Draft' state, it may have keywords "
                      "that have not been developed yet. Skipping the "
                      "testcase execution and it will be marked as 'ERROR'")
        tc_status = "ERROR"
    elif isRobotWrapperCase is True and from_ts is False:
        print_warning("Case which has robot_wrapper steps should be executed "
                      "as part of a Suite. Skipping the case execution and "
                      "it will be marked as 'ERROR'")
        tc_status = "ERROR"
    elif step_list:

        setup_tc_status, cleanup_tc_status = True, True
        #1.execute setup steps if testwrapperfile is present in testcase
        #and not from testsuite execution
        #2.execute setup steps if testwrapperfile is present in testcase
        #and from testsuite execution and testwrapperfile is not defined in test suite.
        if (testwrapperfile and not from_ts) or (testwrapperfile and \
            from_ts and not 'suite_testwrapper_file' in data_repository):
            setup_step_list = common_execution_utils.get_step_list(testwrapperfile,
                                                                   "Setup", "step")
            if not len(setup_step_list):
                print_warning("step list is empty in {0} block".format("Setup"))

            print_info("****** SETUP STEPS EXECUTION STARTS *******")
            data_repository['wt_step_type'] = 'setup'
            #to consider relative paths provided from wrapperfile instead of testcase file
            original_tc_filepath = data_repository['wt_testcase_filepath']
            data_repository['wt_testcase_filepath'] = testwrapperfile
            setup_tc_status = execute_steps(j_data_type, j_runtype, \
                data_repository, setup_step_list, tc_junit_object, iter_ts_sys)
            #reset to original testcase filepath
            data_repository['wt_testcase_filepath'] = original_tc_filepath
            data_repository['wt_step_type'] = 'step'
            print_info("setup_tc_status : {0}".format(setup_tc_status))
            print_info("****** SETUP STEPS EXECUTION ENDS *******")

        if setup_on_error_action == 'next' or \
            (setup_on_error_action == 'abort' \
            and isinstance(setup_tc_status, bool) and setup_tc_status):
            if steps_tag == "Steps":
                print_info("****** TEST STEPS EXECUTION STARTS *******")
            data_repository['wt_step_type'] = 'step'
            tc_status = execute_steps(data_type, runtype, \
                data_repository, step_list, tc_junit_object, iter_ts_sys)
            if steps_tag == "Steps":
                print_info("****** TEST STEPS EXECUTION ENDS *******")

        else:
            print_error("Test steps are not executed as setup steps failed to execute,"\
                        "setup status : {0}".format(setup_tc_status))
            print_error("Steps in cleanup will be executed on besteffort")
            tc_status = "ERROR"

        if tc_context.upper() == 'NEGATIVE':
            if all([tc_status != 'EXCEPTION', tc_status != 'ERROR']):
                print_debug("Test case status is: '{0}', flip status as context is "
                            "negative".format(tc_status))
                tc_status = not tc_status

        #Execute Debug section from testcase tw file upon tc failure
        if not isinstance(tc_status, bool) or (isinstance(tc_status, bool) and tc_status is False):
            tc_testwrapperfile = None
            if Utils.xml_Utils.nodeExists(testcase_filepath, "TestWrapperFile"):
                tc_testwrapperfile = Utils.xml_Utils.getChildTextbyParentTag(testcase_filepath, \
                    'Details', 'TestWrapperFile')
                abs_cur_dir = os.path.dirname(testcase_filepath)
                tc_testwrapperfile = Utils.file_Utils.getAbsPath(tc_testwrapperfile, abs_cur_dir)

            tc_debug_step_list = None
            if tc_testwrapperfile and Utils.xml_Utils.nodeExists(tc_testwrapperfile, "Debug"):
                tc_debug_step_list = common_execution_utils.get_step_list(tc_testwrapperfile,
                                                                          "Debug", "step")
            if tc_debug_step_list:
                print_info("****** DEBUG STEPS EXECUTION STARTS *******")
                data_repository['wt_step_type'] = 'debug'
                original_tc_filepath = data_repository['wt_testcase_filepath']
                #to consider relative paths provided from wrapperfile instead of testcase file
                data_repository['wt_testcase_filepath'] = tc_testwrapperfile
                debug_tc_status = execute_steps(j_data_type, j_runtype, \
                    data_repository, tc_debug_step_list, tc_junit_object, iter_ts_sys)
                #reset to original testcase filepath
                data_repository['wt_testcase_filepath'] = original_tc_filepath
                data_repository['wt_step_type'] = 'step'
                print_info("debug_tc_status : {0}".format(debug_tc_status))
                print_info("****** DEBUG STEPS EXECUTION ENDS *******")

        #1.execute cleanup steps if testwrapperfile is present in testcase
        #and not from testsuite execution
        #2.execute cleanup steps if testwrapperfile is present in testcase
        #and from testsuite execution and testwrapperfile is not defined in test suite.
        if (testwrapperfile and not from_ts) or (testwrapperfile and \
            from_ts and not 'suite_testwrapper_file' in data_repository):
            cleanup_step_list = common_execution_utils.get_step_list(testwrapperfile,
                                                                     "Cleanup", "step")
            if not len(cleanup_step_list):
                print_warning("step list is empty in {0} block".format("Cleanup"))
            print_info("****** CLEANUP STEPS EXECUTION STARTS *******")
            data_repository['wt_step_type'] = 'cleanup'
            original_tc_filepath = data_repository['wt_testcase_filepath']
            #to consider relative paths provided from wrapperfile instead of testcase file
            data_repository['wt_testcase_filepath'] = testwrapperfile
            cleanup_tc_status = execute_steps(j_data_type, j_runtype, \
                data_repository, cleanup_step_list, tc_junit_object, iter_ts_sys)
            data_repository['wt_step_type'] = 'step'
            print_info("cleanup_tc_status : {0}".format(cleanup_tc_status))
            print_info("****** CLEANUP STEPS EXECUTION ENDS *******")

    if step_list and isinstance(tc_status, bool) and isinstance(cleanup_tc_status, bool) \
        and tc_status and cleanup_tc_status:
        tc_status = True
    #set tc status to WARN if only cleanup fails
    elif step_list and isinstance(tc_status, bool) and tc_status and cleanup_tc_status != True:
        print_warning("setting tc status to WARN as cleanup failed")
        tc_status = "WARN"

    if step_list and tc_status == False and tc_onError_action \
            and tc_onError_action.upper() == 'ABORT_AS_ERROR':
        print_info("Testcase status will be marked as ERROR as onError "
                   "action is set to 'abort_as_error'")
        tc_status = "ERROR"

    defectsdir = data_repository['wt_defectsdir']
    check_and_create_defects(tc_status, auto_defects, data_repository, tc_junit_object)

    print_debug("\n")
    tc_end_time = Utils.datetime_utils.get_current_timestamp()
    print_debug("[{0}] Testcase execution completed".format(tc_end_time))
    tc_duration = Utils.datetime_utils.get_time_delta(tc_start_time)
    hms = Utils.datetime_utils.get_hms_for_seconds(tc_duration)
    Utils.data_Utils.update_datarepository({"tc_duration" : hms})
    tc_junit_object.update_count(tc_status, "1", "ts", data_repository['wt_ts_timestamp'])
    tc_junit_object.update_count("tests", "1", "ts", data_repository['wt_ts_timestamp'])
    tc_junit_object.update_count("tests", "1", "pj", "not appicable")
    tc_junit_object.update_attr("status", str(tc_status), "tc", tc_timestamp)
    tc_junit_object.update_attr("time", str(tc_duration), "tc", tc_timestamp)
    tc_junit_object.add_testcase_message(tc_timestamp, tc_status)
    if str(tc_status).upper() in ["FALSE", "ERROR", "EXCEPTION"]:
        tc_junit_object.update_attr("defects", defectsdir, "tc", tc_timestamp)

    # Adding resultsdir, logsdir, title as attributes to testcase_tag in the junit result file
    # Need to remove these after making resultsdir, logsdir as part of properties tag in testcase
    tc_junit_object.update_attr("resultsdir", os.path.dirname(data_repository['wt_resultsdir']),
                                "tc", tc_timestamp)
    tc_junit_object.update_attr("logsdir", os.path.dirname(data_repository['wt_logsdir']),
                                "tc", tc_timestamp)
    if data_repository.get("kafka_producer", None):
        war_producer = data_repository.get("kafka_producer")
        war_producer.kafka_producer.flush(60)
        war_producer.kafka_producer.close()
        print_info("Producer Closed connection with kafka broker")
    elif data_repository.get("kafka_consumer", None):
        war_consumer = data_repository.get("kafka_consumer")
        war_consumer.kafka_consumer.close()
        print_info("Consumer closed connection with kafka broker")
        
    data_file = data_repository["wt_datafile"]
    system_name = ""
    try:
        tree = et.parse(data_file)
        for elem in tree.iter():
            if elem.tag == "system":
                for key, value in elem.items():
                    if value == "kafka_producer":
                        system_name = elem.get("name")
                        break
    except:
        pass

    if system_name:
        junit_file_obj = data_repository['wt_junit_object']
        root = junit_file_obj.root
        suite_details = root.findall("testsuite")[0]
        test_case_details = suite_details.findall("testcase")
        if test_case_details:
            test_case_details = suite_details.findall("testcase")[0]
            print_info("kafka server is presented in Inputdata file..")
            system_details = _get_system_or_subsystem(data_file, system_name)
            data = {}
            for item in system_details.getchildren():
                if item.tag == "kafka_port":
                    ssh_port = item.text
                    ssh_port = Utils.data_Utils.sub_from_env_var(ssh_port)
                    continue
                if item.tag == "ip":
                    ip_address = item.text
                    ip_address = Utils.data_Utils.sub_from_env_var(ip_address)
                    continue
                try:
                    value = ast.literal_eval(item.text)
                except ValueError:
                    value = item.text
                data.update({item.tag: value})

            ip_port = ["{}:{}".format(ip_address, ssh_port)]
            data.update({"bootstrap_servers": ip_port})
            data.update({"value_serializer": lambda x: json.dumps(x).encode('utf-8')})
            try:
                producer = WarriorKafkaProducer(**data)
                producer.send_messages('warrior_results', suite_details.items())
                producer.send_messages('warrior_results', test_case_details.items())
                print_info("message published to topic: warrior_results {}".format(
                    suite_details.items()))
                print_info("message published to topic: warrior_results {}".format(
                    test_case_details.items()))
            except:
                print_warning("Unable to connect kafka server !!")

    report_testcase_result(tc_status, data_repository, tag=steps_tag)
    if not from_ts:
        tc_junit_object.update_count(tc_status, "1", "pj", "not appicable")
        tc_junit_object.update_count("suites", "1", "pj", "not appicable")
        tc_junit_object.update_attr("status", str(tc_status), "ts",
                                    data_repository['wt_ts_timestamp'])
        tc_junit_object.update_attr("status", str(tc_status), "pj", "not appicable")
        tc_junit_object.update_attr("time", str(tc_duration), "ts",
                                    data_repository['wt_ts_timestamp'])
        tc_junit_object.update_attr("time", str(tc_duration), "pj", "not appicable")

        tc_junit_object.output_junit(data_repository['wt_resultsdir'])

        # Save JUnit/HTML results of the Case in MongoDB server
        if data_repository.get("db_obj") is not False:
            tc_junit_xml = data_repository['wt_resultsdir'] + os.sep +\
                tc_junit_object.filename + "_junit.xml"
            data_repository.get("db_obj").add_html_result_to_mongodb(tc_junit_xml)
    else:
        # send an email on TC failure(no need to send an email here when
        # executing a single case).
        if str(tc_status).upper() in ["FALSE", "ERROR", "EXCEPTION"]:
            email_setting = None
            # for first TC failure
            if "any_failures" not in data_repository:
                email_params = email.get_email_params("first_failure")
                if all(value != "" for value in email_params[:3]):
                    email_setting = "first_failure"
                data_repository['any_failures'] = True
            # for further TC failures
            if email_setting is None:
                email_params = email.get_email_params("every_failure")
                if all(value != "" for value in email_params[:3]):
                    email_setting = "every_failure"

            if email_setting is not None:
                email.compose_send_email("Test Case: ", data_repository[\
                 'wt_testcase_filepath'], data_repository['wt_logsdir'],\
                                         data_repository['wt_resultsdir'], tc_status,
                                         email_setting)

        if not tc_parallel and not data_repository["war_parallel"]:
            if 'wp_results_execdir' in data_repository:
                # Create and replace existing Project junit file for each case
                tc_junit_object.output_junit(data_repository['wp_results_execdir'],
                                             print_summary=False)
            else:
                # Create and replace existing Suite junit file for each case
                tc_junit_object.output_junit(data_repository['wt_results_execdir'],
                                             print_summary=False)

    if tc_parallel:
        tc_impact = data_repository['wt_tc_impact']
        if tc_impact.upper() == 'IMPACT':
            msg = "Status of the executed test case impacts Testsuite result"
        elif tc_impact.upper() == 'NOIMPACT':
            msg = "Status of the executed test case does not impact Teststuie result"
        print_debug(msg)
        tc_name = Utils.file_Utils.getFileName(testcase_filepath)
        # put result into multiprocessing queue and later retrieve in corresponding driver
        queue.put((tc_status, tc_name, tc_impact, tc_duration, tc_junit_object))

    # Save XML results of the Case in MongoDB server
    if data_repository.get("db_obj") is not False:
        data_repository.get("db_obj").add_xml_result_to_mongodb(data_repository['wt_resultfile'])

    # main need tc_status and data_repository values to unpack
    return tc_status, data_repository
def execute_steps(data_type, runtype, data_repository, step_list, tc_junit_object, iter_ts_sys):
    """executes steps based on given data_type and runtype"""
    tc_status = True
    if data_type.upper() == 'CUSTOM' and \
        runtype.upper() == 'SEQUENTIAL_KEYWORDS':
        tc_status = execute_custom(data_type, runtype,\
                                       custom_sequential_kw_driver,\
                                       data_repository, step_list)
    elif data_type.upper() == 'CUSTOM' and \
                runtype.upper() == 'PARALLEL_KEYWORDS':
        tc_junit_object.remove_html_obj()
        data_repository["war_parallel"] = True
        tc_status = execute_custom(data_type, runtype,\
                                       custom_parallel_kw_driver,\
                                       data_repository, step_list)
    elif data_type.upper() == 'ITERATIVE' and \
                runtype.upper() == 'SEQUENTIAL_KEYWORDS':
        print_debug("iterative sequential")
        system_list = get_system_list(data_repository['wt_datafile'],\
                                          iter_req=True) \
            if iter_ts_sys is None else [iter_ts_sys]
            # print len(system_list)
        if len(system_list) == 0:
            print_warning("Datatype is iterative but no systems found in "
                          "input datafile, when Datatype is iterative the "
                          "InputDataFile should have system(s) to "
                          "iterate upon")
            tc_status = False
        elif len(system_list) > 0:
            tc_status = iterative_sequential_kw_driver.main(\
                 step_list, data_repository, tc_status, system_list)
    elif data_type.upper() == 'ITERATIVE' and \
                runtype.upper() == 'PARALLEL_KEYWORDS':
        tc_junit_object.remove_html_obj()
        data_repository["war_parallel"] = True
        print_debug("iterative parallel")
        system_list = get_system_list(data_repository['wt_datafile'],\
                                          iter_req=True) \
        if iter_ts_sys is None else [iter_ts_sys]
            # print len(system_list)
        if len(system_list) == 0:
            print_warning("DataType is iterative but no systems found in "
                          "input datafile, when DataType id iterative the "
                          "InputDataFile should have system(s) to "
                          "iterate upon")
            tc_status = False
        elif len(system_list) > 0:
            tc_status = iterative_parallel_kw_driver.main(\
                 step_list, data_repository, tc_status, system_list)
    elif data_type.upper() == "HYBRID":
        print_debug("Hybrid")
        system_list, system_node_list = get_system_list(\
             data_repository['wt_datafile'], node_req=True)
            # call the hybrid driver here
        hyb_drv_obj = hybrid_driver_class.HybridDriver(\
             step_list, data_repository, tc_status, system_list,\
             system_node_list)
        tc_status = hyb_drv_obj.execute_hybrid_mode()
    else:
        print_warning("unsupported value provided for testcase data_type "\
                          "or testsuite runtype")
        tc_status = False

    return tc_status
Exemplo n.º 30
0
def execute_parallel_testcases(testcase_list,
                               suite_repository,
                               data_repository,
                               from_project,
                               tc_parallel=True,
                               auto_defects=False,
                               iter_ts_sys=None):
    """Takes a list of testcase as input and executes them in parallel by
    creating separate process of testcase_driver for each of these testcase """

    jobs_list = []
    output_q = None
    suite = suite_repository['suite_name']
    testsuite_filepath = suite_repository['testsuite_filepath']
    suite_error_action = suite_repository['def_on_error_action']
    jiraproj = data_repository["jiraproj"]
    testsuite_dir = os.path.dirname(testsuite_filepath)

    for testcase in testcase_list:
        target_module = testcase_driver.main
        tc_rel_path = testsuite_utils.get_path_from_xmlfile(testcase)
        if tc_rel_path is not None:
            tc_path = Utils.file_Utils.getAbsPath(tc_rel_path, testsuite_dir)
        else:
            tc_path = str(tc_rel_path)
        tc_runtype = testsuite_utils.get_runtype_from_xmlfile(testcase)
        tc_impact = Utils.testcase_Utils.get_impact_from_xmlfile(testcase)
        tc_context = Utils.testcase_Utils.get_context_from_xmlfile(testcase)
        suite_step_data_file = testsuite_utils.get_data_file_at_suite_step(
            testcase, suite_repository)
        tc_onError_action = Utils.xml_Utils.get_attributevalue_from_directchildnode(
            testcase, 'onError', 'action')
        tc_onError_action = tc_onError_action if tc_onError_action else suite_error_action
        if suite_step_data_file is not None:
            data_file = Utils.file_Utils.getAbsPath(suite_step_data_file,
                                                    testsuite_dir)
            data_repository[tc_path] = data_file

        data_repository['wt_tc_impact'] = tc_impact

        # instead of using args_list, we need to use an ordered dict
        # for tc args because intially q will be none and
        # we need to cange it after creating a new q
        # then we need to maintain the position of arguments
        # before calling the testcase driver main function.

        tc_args_dict = OrderedDict([("tc_path", tc_path),
                                    ("data_repository", data_repository),
                                    ("tc_context", tc_context),
                                    ("tc_runtype", tc_runtype),
                                    ("tc_parallel", tc_parallel),
                                    ("auto_defects", auto_defects),
                                    ("suite", suite),
                                    ("tc_onError_action", tc_onError_action),
                                    ("iter_ts_sys", iter_ts_sys),
                                    ("output_q", output_q),
                                    ("jiraproj", jiraproj)])

        process, jobs_list, output_q = create_and_start_process_with_queue(
            target_module, tc_args_dict, jobs_list, output_q)

    print_debug("process: {0}".format(process))
    for job in jobs_list:
        job.join()

    result_list = get_results_from_queue(output_q)

    tc_status_list = []
    tc_name_list = []
    tc_impact_list = []
    tc_duration_list = []
    # Get the junit object of each testcase, extract the information from it and
    # combine with testsuite junit object
    tc_junit_list = []

    for result in result_list:
        tc_status_list.append(result[0])
        tc_name_list.append(result[1])
        tc_impact_list.append(result[2])
        tc_duration_list.append(result[3])
        if len(result) > 4:
            tc_junit_list.append(result[4])

    # parallel testcases generate multiple testcase junit result files
    # each files log the result for one testcase and not intergrated
    # update testsuite junit result file with individual testcase result files
    data_repository['wt_junit_object'] = update_ts_junit_resultfile(
        data_repository['wt_junit_object'], tc_junit_list,
        data_repository['wt_ts_timestamp'])
    testsuite_status = Utils.testcase_Utils.compute_status_using_impact(
        tc_status_list, tc_impact_list)
    return testsuite_status