Esempio n. 1
0
def execute_iterative_parallel_testcases(system_list,
                                         testcase_list,
                                         suite_repository,
                                         data_repository,
                                         from_project,
                                         tc_parallel=True,
                                         auto_defects=False):
    """Takes a list of systems as input and executes the testcases in parallel by
    creating separate process of testcase_driver for each of these systems """

    jobs_list = []
    output_q = None

    for system in system_list:
        target_module = sequential_testcase_driver.main

        #args_list = [testcase_list, suite_repository, data_repository, from_project,
        #             auto_defects, system, tc_parallel]
        tc_args_dict = OrderedDict([("testcase_list", testcase_list),
                                    ("suite_repository", suite_repository),
                                    ("data_repository", data_repository),
                                    ("from_project", from_project),
                                    ("auto_defects", auto_defects),
                                    ("system", system),
                                    ("tc_parallel", tc_parallel),
                                    ("output_q", output_q)])

        process, jobs_list, output_q = create_and_start_process_with_queue(
            target_module, tc_args_dict, jobs_list, output_q)

    print_debug("process: {0}".format(process))
    for job in jobs_list:
        job.join()

    result_list = get_results_from_queue(output_q)

    tc_status_list = []
    tc_name_list = []
    tc_impact_list = []
    tc_duration_list = []
    # Get the junit object of each testcase, extract the information from it and combine with testsuite junit object
    tc_junit_list = []

    # Suite results
    for result in result_list:
        # Case results
        for val in range(len(result[0])):
            tc_status_list.append(result[0][val])
            tc_name_list.append(result[1])
            tc_impact_list.append(result[2][val])
            tc_duration_list.append(result[3][val])
            tc_junit_list.append(result[4][val])
    # parallel testcases generate multiple testcase junit result files
    # each files log the result for one testcase and not intergrated
    # update testsuite junit result file with individual testcase result files
    update_ts_junit_resultfile(suite_repository['wt_junit_object'],
                               tc_junit_list)
    testsuite_status = Utils.testcase_Utils.compute_status_using_impact(
        tc_status_list, tc_impact_list)
    return testsuite_status
Esempio n. 2
0
    def __check_input_datafile(cls, filepath, testname, check_files_dict):
        """ Verify that the input data file exists in the path provided.
            If path not provided verify the default data file

        Arguments:
              1. filepath: filepath will be parsed as input for checking
                 Input data
              3. testname: to mention whether it is Testcase/Testsuite datafile
              2. check_files_dict: a dict element to check the status of files
                 whether it has been verified already or not

        Return:
              1. result(bool): if the Datafiles exist, returns True: else False
              2. check_files_dict: a dict element to check the status of files
                 whether it has been verified already or not
        """

        result = []

        input_data_file = xml_Utils.getChildTextbyParentTag(filepath, 'Details',
                                                            'InputDataFile')
        if input_data_file is not False and input_data_file is not None:
            if testname is 'Testsuite':
                check_files_dict['check_datafile'] = True
            input_data_file = str(input_data_file).strip()
            if str(input_data_file).upper() == 'NO_DATA':
                print_info('No_Data option selected for this testcase')
                result.append(True)

            elif 'NO_DATA' not in str(input_data_file).upper():

                data_file_path = file_Utils.getAbsPath(input_data_file,
                                                       os.path.dirname(filepath))
                print_info("{0} input data_file_path: {1}".format(testname, data_file_path))
                if os.path.exists(data_file_path):
                    print_info("{0} Input datafile is present "\
                                "in location {1}".format(testname, data_file_path))
                    result.append(True)
                else:
                    print_error("{0} Input datafile is NOT "\
                                 "present in location {1}".format(testname, data_file_path))
                    result.append(False)

        elif input_data_file is None or input_data_file is False:
            if testname is 'Testcase':
                print_info("InputDataFile is not provided,"\
                           "checking if default InputDataFile exists....")
                default_datafilepath = execution_files_class.get_default_xml_datafile(\
                    filepath)
                print_debug("default_datafile_path: {0}".format(default_datafilepath))
                if os.path.exists(default_datafilepath):
                    print_info("Default input datafile for the Testcase is available")
                    result.append(True)
                else:
                    print_error("Default input datafile for the Testcase is NOT available")
                    result.append(False)
            else:
                check_files_dict['check_datafile'] = False

        return result, check_files_dict
    def testcase_prerun(self, tc_filepath, check_files_dict=None):
        """Executes prerun of a testcase file """
        print('\n')
        print_info('=' * 40)
        print_debug("Validating Test case xml")
        print_info('=' * 40)

        testcase_xsd_fullpath = self.xsd_dir + os.sep + 'warrior_testcase.xsd'
        #print_info("Test case_xsd_location: {0}".format(testcase_xsd_fullpath))

        tc_status = self.xml_to_xsd_validation(tc_filepath,
                                               testcase_xsd_fullpath)

        if tc_status:
            data_file_valid = self.check_tc_input_datafile(
                tc_filepath, check_files_dict)
            tc_status &= data_file_valid
            steps_field_valid = self.check_steps(tc_filepath)
            tc_status &= steps_field_valid
        else:
            print_error("Incorrect xml format")
        time.sleep(5)
        status = testcase_Utils.convertLogic(tc_status)
        print_info('TC STATUS: {0}ED'.format(status))

        return tc_status
Esempio n. 4
0
def execute_iterative_parallel(step_list, data_repository, tc_status,
                               system_list):
    """Takes a list of steps as input and executes them in parallel by
    creating separate process of step_driver for each of these steps """

    jobs_list = []
    output_q = None
    for system_name in system_list:
        target_module = testcase_steps_execution.main
        #args_list = [step_list, data_repository, system_name, True]
        args_dict = OrderedDict([
            ("step_list", step_list),
            ("data_repository", data_repository),
            ("system_name", system_name),
            ("kw_parallel", True),
            ("output_q", output_q),
        ])

        process, jobs_list, output_q = create_and_start_process_with_queue(
            target_module, args_dict, jobs_list, output_q)

    print_debug("process: {0}".format(process))
    for job in jobs_list:
        job.join()

    result_list = get_results_from_queue(output_q)

    system_status_list = []
    system_resultfile_list = []
    step_impact_list = []
    tc_junit_list = []

    for result in result_list:
        step_status_list = result[0]
        kw_resultfile_list = result[1]
        system_name = result[2]
        step_impact_list = result[3]
        tc_junit_list.append(result[4])
        system_status = testcase_Utils.compute_status_using_impact(
            step_status_list, step_impact_list)
        system_resultfile = testcase_Utils.compute_system_resultfile(
            kw_resultfile_list, data_repository['wt_resultsdir'], system_name)
        system_status_list.append(system_status)
        system_resultfile_list.append(system_resultfile)

    tc_status = Utils.testcase_Utils.compute_status_without_impact(
        system_status_list)
    # parallel keywords generate multiple keyword junit result files
    # each files log the result for one keyword and not intergrated
    # update testcase junit result file with individual keyword result files
    data_repository['wt_junit_object'] = update_tc_junit_resultfile(
        data_repository['wt_junit_object'], tc_junit_list,
        data_repository['wt_tc_timestamp'])
    print_debug("Updating Testcase result file...")
    Utils.testcase_Utils.append_result_files(data_repository['wt_resultfile'],
                                             system_resultfile_list)

    return tc_status
 def _compute_testcase_status(self):
     """
     """
     tc_status = testcase_Utils.compute_status_without_impact(
         self.system_status_list)
     print_debug("Updating Testcase result file...")
     testcase_Utils.append_result_files(
         self.data_repository['wt_resultfile'], self.system_resultfile_list)
     return tc_status
    def ssh_con(self,
                retries=1,
                interval=1,
                timeout=60,
                verify_keys=False,
                invoke_shell=False):
        """Connects to the host using ssh object

        :Arguments:
            1. retries =  No of attempts before returning fail
            2. interval = Time to wait before the next retry
            3. timeout = wait for response
            4. verify_keys = Verify the host entry is available in host key

        :Returns:
            1. status(bool)= True / False

        """
        for attempt in range(retries):
            print_debug("Attempt{} connecting to {}".format(
                attempt + 1, self.target))
            try:
                if not verify_keys:
                    self.sshobj.set_missing_host_key_policy(\
                                                self.param.AutoAddPolicy())
                self.sshobj.connect(self.target,
                                    self.port,
                                    self.uid,
                                    self.pid,
                                    timeout=timeout,
                                    look_for_keys=verify_keys)
                if invoke_shell:
                    print_info("Opening shell for {}".format(self.sshobj))
                    self.sshobj.invoke_shell()

                if self.logfile is not None:
                    self.log = open(self.logfile, 'w')
            except self.param.SSHException:
                print_error(" ! could not connect to %s...check logs" %
                            self.target)
                return False
            except Exception as err:
                print_error("Login failed {0}".format(str(err)))
                sleep(interval)
                continue
            else:
                print_info("Connected to the host")
                return True
        return False
Esempio n. 7
0
    def open_browser(self, browser_name='firefox', webdriver_remote_url=False,
                     desired_capabilities=None, **kwargs):
        """Open a browser session"""

        profile_dir = kwargs.get('profile_dir', None)

        if webdriver_remote_url:
            print_debug("Opening browser '{0}' through remote server at '{1}'"\
                        .format(browser_name, webdriver_remote_url))
        else:
            print_debug("Opening browser '%s'" % (browser_name))
        browser_name = browser_name
        browser = self._make_browser(browser_name, desired_capabilities,
                                     profile_dir, webdriver_remote_url, **kwargs)
        return browser
Esempio n. 8
0
def send_email(smtp_host, sender, receivers, subject, body, files):
    """ sends email from smtp server using input arguments:
    :Arguments:
        1. smtp_host - smtp host name
        2. sender - sender email ID
        3. receivers - receiver email ID(s)
        4. subject - email subject line
        5. body - email body
        6. files - files to be attached
    """
    if not smtp_host:
        print_debug("No smtp host defined in w_settings, no email sent")
        return
    if not receivers:
        print_debug("No receiver defined in w_settings, no email sent")
        return

    message = MIMEMultipart()
    message['From'] = sender
    message['To'] = receivers
    receivers_list = [receiver.strip() for receiver in receivers.split(',')]
    message['Subject'] = subject

    # HTML is used for better formatting of mail body
    part = MIMEText(body, 'html')
    message.attach(part)

    for attach_file in files or []:
        with open(attach_file, "rb") as fil:
            part = MIMEBase('application', 'octet-stream')
            part.set_payload((fil).read())
            encoders.encode_base64(part)
            part.add_header('Content-Disposition',
                            "attachment;filename= %s" % basename(attach_file))
            message.attach(part)

    try:
        smtp_obj = smtplib.SMTP(smtp_host)
        smtp_obj.sendmail(sender, receivers_list, message.as_string())
        pNote('Execution results emailed to receiver(s): {}'.format(receivers))
        smtp_obj.close()

    except BaseException:
        pNote("Error occurred while sending email, check w_settings.xml"
              "configuration for correct smtp host, "
              "receiver email address etc.")
Esempio n. 9
0
def execute_custom_sequential(step_list, data_repository, tc_status,
                              system_name):
    """ Takes a list of steps as input and executes
    them sequentially by sending then to the
    testcase_steps_execution driver Executes all the steps in custom sequential fashion """

    step_status_list, kw_resultfile_list,\
    step_impact_list = testcase_steps_execution.main(step_list, data_repository, system_name)

    tc_status = Utils.testcase_Utils.compute_status_using_impact(
        step_status_list, step_impact_list)

    print_debug("Updating Testcase result file...")
    Utils.testcase_Utils.append_result_files(data_repository['wt_resultfile'],
                                             kw_resultfile_list)

    return tc_status
    def testsuite_prerun(self,
                         testsuite_filepath,
                         root,
                         check_files_dict=None):
        """Executes prerun of a testsuite file """
        print('\n')
        print_info('*' * 40)
        print_debug("Validating Test suite xml")
        print_info('*' * 40)

        testsuite_xsd_fullpath = self.xsd_dir + os.sep + 'warrior_suite.xsd'
        testsuite_status = self.xml_to_xsd_validation(testsuite_filepath,
                                                      testsuite_xsd_fullpath)
        if testsuite_status:
            data_file_valid, check_files_dict = self.check_testsuite_input_datafile(\
                testsuite_filepath, check_files_dict)
            testsuite_status &= data_file_valid
            for testcase in root.iter('Testcase'):
                tc_path_rel = testsuite_utils.get_path_from_xmlfile(testcase)
                tc_path = file_Utils.getAbsPath(
                    tc_path_rel, os.path.dirname(testsuite_filepath))
                time.sleep(5)
                if os.path.isfile(tc_path):
                    print('\n')
                    print_info('tc_path: {0}, Testcase file exists...'.format(
                        tc_path))
                    tc_status = self.testcase_prerun(tc_path, check_files_dict)
                else:
                    print('\n')
                    tc_status = False
                    print_error(
                        'tc_path: {0}, Testcase file does not exist'.format(
                            tc_path))
                    print_info('TC STATUS: {0}'.format('FAILED'))
                testsuite_status &= tc_status

        else:
            print_error("Incorrect xml format")

        time.sleep(5)
        print('\n')
        status = testcase_Utils.convertLogic(testsuite_status)
        print_info('SUITE STATUS: {0}ED'.format(status))

        return testsuite_status
    def project_prerun(self, project_filepath, root):
        """Executes prerun of a project file """

        print('\n')
        print_info('+' * 40)
        print_debug("Validating Project xml")
        print_info('+' * 40)
        project_xsd_fullpath = self.xsd_dir + os.sep + 'warrior_project.xsd'
        project_status = self.xml_to_xsd_validation(project_filepath,
                                                    project_xsd_fullpath)
        if project_status:
            check_files_dict = self.check_proj_results_logsdir(
                project_filepath)
            for testsuite in root.iter('Testsuite'):
                testsuite_path_rel = testsuite_utils.get_path_from_xmlfile(
                    testsuite)
                testsuite_path = file_Utils.getAbsPath(
                    testsuite_path_rel, os.path.dirname(project_filepath))

                if os.path.isfile(testsuite_path):
                    print('\n')
                    print_info("Testsuite_path: {0}, Testsuite"\
                               "file exists...".format(testsuite_path))
                    ts_root = xml_Utils.getRoot(testsuite_path)
                    tsuite_status = self.testsuite_prerun(
                        testsuite_path, ts_root, check_files_dict)
                else:
                    print('\n')
                    tsuite_status = False
                    print_error('testsuite_path: {0},\
                                Testsuite file does not exist'.format(
                        testsuite_path))
                    print_info('SUITE STATUS: {0}'.format('FAILED'))
                project_status &= tsuite_status

        else:
            print_error("Incorrect xml format")

        time.sleep(5)
        print('\n')
        status = testcase_Utils.convertLogic(project_status)
        print_info('PROJECT STATUS: {0}ED'.format(status))

        return project_status
Esempio n. 12
0
 def _get_element(self, browser, locator, **kwargs):
     """gets the element with matching criteria
     uses other private methods """
     findall = kwargs.get('findall', None)
     prefix, value = self._parse_locator(locator)
     if prefix is None:
         raise ValueError(("Strategy to find elements is "\
                           "not provided in the locator={0}".format(locator)))
     locator_function = self._get_strategy_function(prefix)
     if not locator_function:
         raise ValueError(("{0} in locator={1} is not a "\
                           "supported strategy to find elements.".format(prefix, locator)))
     try:
         element = locator_function(value, browser, findall)
     except NoSuchElementException as exception:
         #print_exception(exception)
         element = None
     else:
         print_debug("Element found")
     return element
Esempio n. 13
0
    def open_browser(self, browser_name='firefox', webdriver_remote_url=False,
                     desired_capabilities=None, binary=None, gecko_path=None,
                     **kwargs):
        """Open a browser session"""

        profile_dir = kwargs.get('profile_dir', None)

        if webdriver_remote_url:
            print_debug("Opening browser '{0}' through remote server at '{1}'"\
                        .format(browser_name, webdriver_remote_url))
        else:
            print_debug("Opening browser '%s'" % (browser_name))
        browser_name = browser_name
        browser = self._make_browser(browser_name, desired_capabilities,
                                     profile_dir, webdriver_remote_url,
                                    binary=binary, gecko_path=gecko_path)
        print_info("The Selenium Webdriver version is '{0}'".format(webdriver.__version__))
        if browser:
            browser_detail_dict = self.get_browser_details(browser)
            for details, value in browser_detail_dict.items():
                print_info("The Browser '{0}' {1} is '{2}'".format(browser_name, details, value))
        return browser
Esempio n. 14
0
def execute_iterative_sequential(step_list, data_repository, tc_status,
                                 system_list):
    """ Executes all the steps in iterative sequential fashion """

    system_status_list = []
    system_resultfile_list = []

    for system in system_list:
        step_status_list, kw_resultfile_list, step_impact_list = testcase_steps_execution.main(
            step_list, data_repository, system_name=system)
        system_status = Utils.testcase_Utils.compute_status_using_impact(
            step_status_list, step_impact_list)
        system_resultfile = compute_system_resultfile(
            kw_resultfile_list, data_repository['wt_resultsdir'], system)
        system_status_list.append(system_status)
        system_resultfile_list.append(system_resultfile)

    tc_status = Utils.testcase_Utils.compute_status_without_impact(
        system_status_list)
    print_debug("Updating Testcase result file...")
    Utils.testcase_Utils.append_result_files(data_repository['wt_resultfile'],
                                             system_resultfile_list)

    return tc_status
Esempio n. 15
0
def sendPing(hostname, count, fname):
    """Sends a ping command
    :Arguments:
        1. count(string) = no of pings to be sent
        2. src_iface(string) = source interface from whihc ping messages
                                are to be sent.
        3. destip(string) = the destination ip address to ping.
        4. fname = logfile to log ping response.
    :Returns:
        status = boolean
    """
    status = False
    command = "ping -c " + count + " " + hostname + " >>" + fname
    print_debug("sendPing, cmd = '%s'" % command)

    response = os.system(command)
    if response == 0:
        print_debug("hostname : '%s' is up " % hostname)
        status = True
    print_debug("hostname : '%s' is down " % hostname)
    return status
Esempio n. 16
0
def sendSourcePing(count, src_iface, destip, fname):
    """Sends a source based ping command
    i.e. if multiple interfaces are configured and available,
    sends pings from the src_iface provided
    :Arguments:
        1. count(string) = no of pings to be sent
        2. src_iface(string) = source interface from whihc ping messages
                                are to be sent.
        3. destip(string) = the destination ip address to ping.
        4. fname = logfile to log ping response.
    :Returns:
        status = boolean
    """
    status = False
    command = "ping -c " + count + " -I " + src_iface + " " + destip + " >>" + fname
    print_debug("command, cmd = '%s'" % command)

    response = os.system(command)
    if response == 0:
        print_debug("hostname : '%s' is up " % destip)
        status = True
    print_debug("hostname : '%s' is down " % destip)
    return status
Esempio n. 17
0
def execute_testsuite(testsuite_filepath, data_repository, from_project,
                      auto_defects, jiraproj, res_startdir, logs_startdir,
                      ts_onError_action):
    """Executes the testsuite (provided as a xml file)
            - Takes a testsuite xml file as input and
            sends each testcase to Basedriver for execution.
            - Computes the testsuite status based on the
            testcase_status and the impact value of the testcase
            - Handles testcase failures as per the default/specific onError action/value
            - Calls the function to report the testsuite status

    Arguments:
    1. testsuite_filepath   = (string) the full path of the testsuite xml file.
    2. Warrior          = (module loader) module loader object to call the Warrior
    3. execution_dir        = (string) the full path of the directory under which the testsuite
                              execution directory will be created (results for the testsuite will
                              be stored in the  testsuite execution directory.)
    """
    suite_start_time = Utils.datetime_utils.get_current_timestamp()
    print_info("[{0}] Testsuite execution starts".format(suite_start_time))
    # goto_tc = False
    suite_repository = get_suite_details(testsuite_filepath, data_repository,
                                         from_project, res_startdir, logs_startdir)
    testcase_list = get_testcase_list(testsuite_filepath)
    execution_type = suite_repository['suite_exectype'].upper()
    no_of_tests = str(len(testcase_list))

    junit_resultfile = suite_repository['junit_resultfile']
    suite_name = suite_repository['suite_name']
    suite_execution_dir = suite_repository['suite_execution_dir']

    data_repository['wt_suite_execution_dir'] = suite_execution_dir
    data_repository['wt_results_execdir'] = suite_repository['ws_results_execdir']
    data_repository['wt_logs_execdir'] = suite_repository['ws_logs_execdir']
    data_repository['wt_suite_name'] = suite_name

    suite_timestamp = testsuite_utils.get_suite_timestamp()
    data_repository['wt_ts_timestamp'] = suite_timestamp
    suite_repository['wt_ts_timestamp'] = suite_timestamp

    data_repository["suite_exectype"] = "iterative" if execution_type == "ITERATIVE_SEQUENTIAL" or \
    execution_type == "ITERATIVE_PARALLEL" else execution_type

    ts_junit_display = "True"
    pj_junit_display = "False"
    if "wt_junit_object" in data_repository:
        ts_junit_object = data_repository["wt_junit_object"]

    else:
        ts_junit_object = junit_class.Junit(filename=suite_name, timestamp=suite_timestamp,
                                            name="customProject_independant_testcase_execution",
                                            display=pj_junit_display)

        if "jobid" in data_repository:
            ts_junit_object.add_jobid(data_repository["jobid"])
            del data_repository["jobid"]
        data_repository["wt_junit_object"] = ts_junit_object
    suite_repository["wt_junit_object"] = ts_junit_object
    ts_junit_object.create_testsuite(location=os.path.dirname(testsuite_filepath),
                                     name=suite_name, timestamp=suite_timestamp,
                                     suite_location=suite_repository['testsuite_filepath'],
                                     title=suite_repository['suite_title'],
                                     display=ts_junit_display,
                                     **ts_junit_object.init_arg())

    # Adding resultsdir as attributes to testsuite_tag in the junit result file
    # Need to remove these after making resultsdir, logsdir as part of properties tag in testcase
    ts_junit_object.update_attr("resultsdir", suite_repository['suite_execution_dir'],
                                "ts", suite_timestamp)
    ts_junit_object.add_property("resultsdir", suite_repository['suite_execution_dir'],
                                 "ts", suite_timestamp)

    if suite_repository.has_key("data_file"):
        data_repository['suite_data_file'] = suite_repository['data_file']

    # jiraproj name
    data_repository['jiraproj'] = jiraproj

    # if not from_project:
    testsuite_utils.pSuite_root(junit_resultfile)

    testsuite_utils.pSuite_testsuite(junit_resultfile, suite_name,
                                     errors='0', skipped='0',
                                     tests=no_of_tests, failures='0',
                                     time='0', timestamp=suite_timestamp)
    testsuite_utils.pSuite_property(junit_resultfile, 'title', suite_repository['suite_title'])
    testsuite_utils.pSuite_property(junit_resultfile, 'location', testsuite_filepath)
    if "jobid" in data_repository:
        testsuite_utils.pSuite_property(junit_resultfile, 'resultlocation',
                                        data_repository["jobid"])
        # del data_repository["jobid"]

    print_suite_details_to_console(suite_repository, testsuite_filepath, junit_resultfile)


    data_repository["war_parallel"] = False

    if execution_type.upper() == 'PARALLEL_TESTCASES':
        ts_junit_object.remove_html_obj()
        data_repository["war_parallel"] = True
        print_info("Executing testcases in parallel")
        test_suite_status = parallel_testcase_driver.main(testcase_list, suite_repository,
                                                          data_repository, from_project,
                                                          tc_parallel=True,
                                                          auto_defects=auto_defects)

    elif execution_type.upper() == 'SEQUENTIAL_TESTCASES':
        print_info("Executing testccases sequentially")
        test_suite_status = sequential_testcase_driver.main(testcase_list, suite_repository,
                                                            data_repository, from_project,
                                                            auto_defects=auto_defects)

    elif execution_type.upper() == 'RUN_UNTIL_FAIL':
        execution_value = Utils.xml_Utils.getChildAttributebyParentTag(testsuite_filepath,
                                                                       'Details',
                                                                       'type', 'Max_Attempts')
        print_info("Execution type: {0}, Attempts: {1}".format(execution_type, execution_value))
        i = 0
        while i < int(execution_value):
            i += 1
            print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i))
            test_suite_status = sequential_testcase_driver.main(testcase_list, suite_repository,
                                                                data_repository, from_project,
                                                                auto_defects=auto_defects)
            test_count = i * len(testcase_list)
            testsuite_utils.pSuite_update_suite_tests(str(test_count))
            if str(test_suite_status).upper() == "FALSE" or\
               str(test_suite_status).upper() == "ERROR":
                break

    elif execution_type.upper() == 'RUN_UNTIL_PASS':
        execution_value = Utils.xml_Utils.getChildAttributebyParentTag(testsuite_filepath,
                                                                       'Details',
                                                                       'type', 'Max_Attempts')
        print_info("Execution type: {0}, Attempts: {1}".format(execution_type, execution_value))
        i = 0
        while i < int(execution_value):
            i += 1
            print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i))
            test_suite_status = sequential_testcase_driver.main(testcase_list, suite_repository,
                                                                data_repository, from_project,
                                                                auto_defects=auto_defects)
            test_count = i * len(testcase_list)
            testsuite_utils.pSuite_update_suite_tests(str(test_count))
            if str(test_suite_status).upper() == "TRUE":
                break

    elif execution_type.upper() == 'RUN_MULTIPLE':
        Max_Attempts = Utils.xml_Utils.getChildAttributebyParentTag(testsuite_filepath, 'Details',
                                                                    'type', 'Max_Attempts')
        Number_Attempts = Utils.xml_Utils.getChildAttributebyParentTag(testsuite_filepath,
                                                                        'Details', 'type',
                                                                        'Number_Attempts')

        if Max_Attempts == "":
            execution_value = Number_Attempts
        else:
            execution_value = Max_Attempts


        print_info("Execution type: {0}, Max Attempts: {1}".format(execution_type, execution_value))

        i = 0
        while i < int(execution_value):
            i += 1
            print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i))
            # We aren't actually summing each test result here...
            test_suite_status = sequential_testcase_driver.main(testcase_list, suite_repository,
                                                                data_repository,
                                                                from_project,
                                                                auto_defects=auto_defects)

    elif execution_type.upper() == "ITERATIVE_SEQUENTIAL":
		# if execution type is iterative sequential call WarriorCore.Classes.iterative_testsuite
        # class and
		# execute the testcases in iterative sequential fashion on the systems
        print_info("Iterative sequential suite")

        iter_seq_ts_obj = IterativeTestsuite(testcase_list, suite_repository,
                                             data_repository, from_project,
                                             auto_defects)
        test_suite_status = iter_seq_ts_obj.execute_iterative_sequential()

    elif execution_type.upper() == "ITERATIVE_PARALLEL":
		# if execution type is iterative parallel call WarriorCore.Classes.iterative_testsuite
        # class and
		# execute the testcases in iterative parallel fashion on the systems
        ts_junit_object.remove_html_obj()
        print_info("Iterative parallel suite")
        data_repository["war_parallel"] = True
        iter_seq_ts_obj = IterativeTestsuite(testcase_list, suite_repository,
                                             data_repository, from_project, auto_defects)

        test_suite_status = iter_seq_ts_obj.execute_iterative_parallel()

    else:
        print_error("unexpected suite_type received...aborting execution")
        test_suite_status = False

    print_info("\n")
    suite_end_time = Utils.datetime_utils.get_current_timestamp()
    print_info("[{0}] Testsuite execution completed".format(suite_end_time))
    suite_duration = Utils.datetime_utils.get_time_delta(suite_start_time)
    hms = Utils.datetime_utils.get_hms_for_seconds(suite_duration)
    print_info("Testsuite duration= {0}".format(hms))
    testsuite_utils.update_suite_duration(str(suite_duration))
    if test_suite_status == False and ts_onError_action and\
        ts_onError_action.upper() == 'ABORT_AS_ERROR':
        print_info("Testsuite status will be marked as ERROR as onError action is set"
                   "to 'abort_as_error'")
        test_suite_status = "ERROR"
    testsuite_utils.report_testsuite_result(suite_repository, test_suite_status)

    ts_junit_object = data_repository['wt_junit_object']
    ts_junit_object.update_count(test_suite_status, "1", "pj")
    ts_junit_object.update_count("suites", "1", "pj", "not appicable")
    ts_junit_object.update_attr("status", str(test_suite_status), "ts", suite_timestamp)
    ts_junit_object.update_attr("time", str(suite_duration), "ts", suite_timestamp)

    if not from_project:
        ts_junit_object.update_attr("status", str(test_suite_status), "pj", "not applicable")
        ts_junit_object.update_attr("time", str(suite_duration), "pj", "not appicable")
        ts_junit_object.output_junit(data_repository['wt_results_execdir'])

        # Save JUnit/HTML results of the Suite in MongoDB server
        if data_repository.get("db_obj") is not False:
            ts_junit_xml = (data_repository['wt_results_execdir'] + os.sep +
                            ts_junit_object.filename+"_junit.xml")
            data_repository.get("db_obj").add_html_result_to_mongodb(ts_junit_xml)
    else:
        # Create and replace existing Project junit file for each suite
        ts_junit_object.output_junit(data_repository['wp_results_execdir'],
                                     print_summary=False)

    return test_suite_status, suite_repository
Esempio n. 18
0
def execute_step(step,
                 step_num,
                 data_repository,
                 system_name,
                 kw_parallel,
                 queue,
                 skip_invoked=True):
    """ Executes a step from the testcase xml file
        - Parses a step from the testcase xml file
        - Get the values of Driver, Keyword, impactsTcResult
        - If the step has arguments, get all the arguments and store them as key/value pairs in
          args_repository
        - Sends the Keyword, data_repository, args_repository to the respective Driver.
        - Reports the status of the keyword executed (obtained as return value from the respective
          Driver)

    Arguments:
    1. step            = (xml element) xml element with tag <step> containing the details of the
                         step to be executed like (Driver, Keyword, Arguments, Impact etc..)
    2. step_num        = (int) step number being executed
    3. data_repository = (dict) data_repository of the testcase
    """

    tc_junit_object = data_repository['wt_junit_object']
    driver = step.get('Driver')
    plugin = step.get('Plugin')
    keyword = step.get('Keyword')
    context = Utils.testcase_Utils.get_context_from_xmlfile(step)
    step_impact = Utils.testcase_Utils.get_impact_from_xmlfile(step)
    step_description = Utils.testcase_Utils.get_description_from_xmlfile(step)
    parallel = kw_parallel

    if parallel is True:
        step_console_log = get_step_console_log(
            data_repository['wt_filename'], data_repository['wt_logsdir'],
            'step-{0}_{1}_consoleLogs'.format(step_num, keyword))

    data_repository['step_num'] = step_num
    data_repository['wt_driver'] = driver
    data_repository['wt_plugin'] = plugin
    data_repository['wt_keyword'] = keyword
    data_repository['wt_step_impact'] = step_impact
    data_repository['wt_step_context'] = context
    data_repository['wt_step_description'] = step_description

    kw_resultfile = get_keyword_resultfile(data_repository, system_name,
                                           step_num, keyword)
    Utils.config_Utils.set_resultfile(kw_resultfile)
    # print the start of runmode execution
    if step.find("runmode") is not None and \
       step.find("runmode").get("attempt") is not None:
        if step.find("runmode").get("attempt") == 1:
            print_info(
                "\n----------------- Start of Step Runmode Execution -----------------\n"
            )
        print_info("KEYWORD ATTEMPT: {0}".format(
            step.find("runmode").get("attempt")))
    # print keyword to result file
    Utils.testcase_Utils.pKeyword(keyword, driver)
    print_info("step number: {0}".format(step_num))
    print_info("Teststep Description: {0}".format(step_description))

    if step.find("retry") is not None and step.find("retry").get(
            "attempt") is not None:
        print_info("KEYWORD ATTEMPT: {0}".format(
            step.find("retry").get("attempt")))
    kw_start_time = Utils.datetime_utils.get_current_timestamp()
    print_info("[{0}] Keyword execution starts".format(kw_start_time))
    # get argument list provided by user
    args_repository = get_arguments(step)
    if system_name is not None:
        args_repository['system_name'] = system_name
    Utils.testcase_Utils.update_arguments(args_repository)
    Utils.testcase_Utils.update_kw_resultfile(kw_resultfile)

    # Executing keyword
    send_keyword_to_productdriver(driver, plugin, keyword, data_repository,
                                  args_repository)
    keyword_status = data_repository['step-%s_status' % step_num]
    Utils.testcase_Utils.update_step_num(str(step_num))
    if context.upper() == 'NEGATIVE' and type(keyword_status) == bool:
        print_debug(
            "Keyword status = {0}, Flip status as context is Negative".format(
                keyword_status))
        keyword_status = not keyword_status
    if WarriorCliClass.mock and (keyword_status is True
                                 or keyword_status is False):
        keyword_status = "RAN"

    # Getting onError action
    # Insert rules else statement here
    print_info("")
    print_info("*** Keyword status ***")
    step_goto_value = False
    step_onError_action = Utils.xml_Utils.get_attributevalue_from_directchildnode(
        step, 'onError', 'action')
    if step_onError_action is not False:
        if step_onError_action.upper() == 'GOTO':
            step_goto_value = Utils.xml_Utils.get_attributevalue_from_directchildnode(
                step, 'onError', 'value')
    testcase_error_action = data_repository['wt_def_on_error_action']
    step_onError_action = step_onError_action if step_onError_action else testcase_error_action
    if step_onError_action.upper() == "GOTO" and step_goto_value is False:
        step_goto_value = data_repository['wt_def_on_error_value']
    onerror = step_onError_action.upper()
    if step_goto_value is not False and step_goto_value is not None:
        onerror = onerror + " step " + step_goto_value
    if keyword_status is False and step_onError_action and \
            step_onError_action.upper() == 'ABORT_AS_ERROR' and skip_invoked:
        print_info(
            "Keyword status will be marked as ERROR as onError action is set to"
            "'abort_as_error'")
        keyword_status = "ERROR"
    Utils.testcase_Utils.reportKeywordStatus(keyword_status, keyword)
    print_info("step number: {0}".format(step_num))

    # Reporting status to data repo
    string_status = {
        "TRUE": "PASS",
        "FALSE": "FAIL",
        "ERROR": "ERROR",
        "EXCEPTION": "EXCEPTION",
        "SKIP": "SKIP",
        "RAN": "RAN"
    }

    if str(keyword_status).upper() in string_status.keys():
        data_repository['step_%s_result' %
                        step_num] = string_status[str(keyword_status).upper()]
    else:
        print_error("unexpected step status, default to exception")
        data_repository['step_%s_result' % step_num] = "EXCEPTION"

    # Addressing impact
    if step_impact.upper() == 'IMPACT':
        msg = "Status of the executed step  impacts TC result"
        if str(keyword_status).upper() == 'SKIP':
            keyword_status = None
        # elif exec_type_onerror is False and str(keyword_status).upper() ==
        # 'SKIP':
    elif step_impact.upper() == 'NOIMPACT':
        msg = "Status of the executed step does not impact TC result"
    Utils.testcase_Utils.pNote_level(msg, "debug", "kw")
    if data_repository.has_key('step-%s_exception' % step_num):
        msg = "Exception message: " + \
            data_repository['step-%s_exception' % step_num]
        Utils.testcase_Utils.pNote_level(msg, "debug", "kw", ptc=False)

    print_info("")
    kw_end_time = Utils.datetime_utils.get_current_timestamp()
    kw_duration = Utils.datetime_utils.get_time_delta(kw_start_time)
    hms = Utils.datetime_utils.get_hms_for_seconds(kw_duration)
    print_info("Keyword duration= {0}".format(hms))
    print_info("[{0}] Keyword execution completed".format(kw_end_time))
    # condition to  print the end of runmode execution when all the attempts finish
    if step.find("runmode") is not None and \
       step.find("runmode").get("attempt") is not None:
        if step.find("runmode").get("attempt") == \
           step.find("runmode").get("runmode_val"):
            print_info(
                "\n----------------- End of Step Runmode Execution -----------------\n"
            )

    impact_dict = {"IMPACT": "Impact", "NOIMPACT": "No Impact"}
    tc_timestamp = data_repository['wt_tc_timestamp']
    impact = impact_dict.get(step_impact.upper())
    tc_resultsdir = data_repository['wt_resultsdir']
    tc_name = data_repository['wt_name']
    #to append keyword name with Setup/Cleanup in testcase report
    if data_repository['wt_step_type'] != 'step':
        keyword = data_repository['wt_step_type'] + "--" + keyword
    add_keyword_result(tc_junit_object,
                       tc_timestamp,
                       step_num,
                       keyword,
                       keyword_status,
                       kw_start_time,
                       kw_duration,
                       kw_resultfile,
                       impact,
                       onerror,
                       step_description,
                       info=str(args_repository),
                       tc_name=tc_name,
                       tc_resultsdir=tc_resultsdir)

    if parallel is True:
        # put result into multiprocessing queue and later retrieve in
        # corresponding driver
        queue.put((keyword_status, kw_resultfile, step_impact.upper(),
                   tc_junit_object))
    elif not data_repository['war_parallel']:
        # Get the type of the file being executed by Warrior: Case/Suite/Project
        war_file_type = data_repository.get('war_file_type')
        if war_file_type == "Case":
            # Create and replace existing Case junit file for each step
            tc_junit_object.output_junit(data_repository['wt_resultsdir'],
                                         print_summary=False)
        elif war_file_type == "Suite":
            # Create and replace existing Suite junit file for each step
            tc_junit_object.output_junit(data_repository['wt_results_execdir'],
                                         print_summary=False)
        elif war_file_type == "Project":
            # Create and replace existing Project junit file for each step
            tc_junit_object.output_junit(data_repository['wp_results_execdir'],
                                         print_summary=False)
    return keyword_status, kw_resultfile, step_impact
def execute_testcase(testcase_filepath,
                     data_repository,
                     tc_context,
                     runtype,
                     tc_parallel,
                     queue,
                     auto_defects,
                     suite,
                     jiraproj,
                     tc_onError_action,
                     iter_ts_sys,
                     steps_tag="Steps"):
    """ Executes the testcase (provided as a xml file)
            - Takes a testcase xml file as input and executes each command in the testcase.
            - Computes the testcase status based on the stepstatus and the impact value of the step
            - Handles step failures as per the default/specific onError action/value
            - Calls the function to report the testcase status

    :Arguments:
        1. testcase_filepath (string) = the full path of the testcase xml file
        2. execution_dir (string) = the full path of the directory under which the
                                    testcase execution directory will be created
                                    (the results, logs for this testcase will be
                                    stored in this testcase execution directory.)
    """

    tc_status = True
    tc_start_time = Utils.datetime_utils.get_current_timestamp()
    tc_timestamp = str(tc_start_time)
    print_info("[{0}] Testcase execution starts".format(tc_start_time))

    get_testcase_details(testcase_filepath, data_repository, jiraproj)
    #get testwrapperfile details like testwrapperfile, data_type and runtype
    testwrapperfile, j_data_type, j_runtype, setup_on_error_action = \
        get_testwrapper_file_details(testcase_filepath, data_repository)
    data_repository['wt_testwrapperfile'] = testwrapperfile
    isRobotWrapperCase = check_robot_wrapper_case(testcase_filepath)

    # These lines are for creating testcase junit file
    from_ts = False
    pj_junit_display = 'False'
    if not 'wt_junit_object' in data_repository:
        # not from testsuite
        tc_junit_object = junit_class.Junit(
            filename=data_repository['wt_name'],
            timestamp=tc_timestamp,
            name="customProject_independant_testcase_execution",
            display=pj_junit_display)
        if "jobid" in data_repository:
            tc_junit_object.add_jobid(data_repository["jobid"])
            del data_repository["jobid"]
        tc_junit_object.create_testcase(
            location=data_repository['wt_filedir'],
            timestamp=tc_timestamp,
            ts_timestamp=tc_timestamp,
            name=data_repository['wt_name'],
            testcasefile_path=data_repository['wt_testcase_filepath'])
        junit_requirements(testcase_filepath, tc_junit_object, tc_timestamp)
        data_repository['wt_ts_timestamp'] = tc_timestamp
    else:
        tag = "testcase" if steps_tag == "Steps" else steps_tag
        tc_junit_object = data_repository['wt_junit_object']
        #creates testcase based on tag given Setup/Steps/Cleanup
        tc_junit_object.create_testcase(
            location="from testsuite",
            timestamp=tc_timestamp,
            ts_timestamp=data_repository['wt_ts_timestamp'],
            classname=data_repository['wt_suite_name'],
            name=data_repository['wt_name'],
            tag=tag,
            testcasefile_path=data_repository['wt_testcase_filepath'])
        from_ts = True
        junit_requirements(testcase_filepath, tc_junit_object,
                           data_repository['wt_ts_timestamp'])
    data_repository['wt_tc_timestamp'] = tc_timestamp
    data_repository['tc_parallel'] = tc_parallel
    data_type = data_repository['wt_data_type']
    if not from_ts:
        data_repository["war_parallel"] = False

    # Adding resultsdir, logsdir, title as attributes to testcase_tag in the junit result file
    # Need to remove these after making resultsdir, logsdir as part of properties tag in testcase
    tc_junit_object.add_property(
        "resultsdir", os.path.dirname(data_repository['wt_resultsdir']), "tc",
        tc_timestamp)
    tc_junit_object.update_attr("title", data_repository['wt_title'], "tc",
                                tc_timestamp)

    data_repository['wt_junit_object'] = tc_junit_object
    print_testcase_details_to_console(testcase_filepath, data_repository,
                                      steps_tag)
    # Prints the path of result summary file at the beginning of execution
    if data_repository['war_file_type'] == "Case":
        filename = os.path.basename(testcase_filepath)
        html_filepath = os.path.join(
            data_repository['wt_resultsdir'],
            Utils.file_Utils.getNameOnly(filename)) + '.html'
        print_info("HTML result file: {0}".format(html_filepath))
    #get the list of steps in the given tag - Setup/Steps/Cleanup
    step_list = common_execution_utils.get_step_list(testcase_filepath,
                                                     steps_tag, "step")
    if not step_list:
        print_warning("Warning! cannot get steps for execution")
        tc_status = "ERROR"

    if step_list and not len(step_list):
        print_warning("step list is empty in {0} block".format(steps_tag))

    tc_state = Utils.xml_Utils.getChildTextbyParentTag(testcase_filepath,
                                                       'Details', 'State')
    if tc_state is not False and tc_state is not None and \
       tc_state.upper() == "DRAFT":
        print_warning("Testcase is in 'Draft' state, it may have keywords "
                      "that have not been developed yet. Skipping the "
                      "testcase execution and it will be marked as 'ERROR'")
        tc_status = "ERROR"
    elif isRobotWrapperCase is True and from_ts is False:
        print_warning("Case which has robot_wrapper steps should be executed "
                      "as part of a Suite. Skipping the case execution and "
                      "it will be marked as 'ERROR'")
        tc_status = "ERROR"
    elif step_list:
        setup_tc_status, cleanup_tc_status = True, True
        #1.execute setup steps if testwrapperfile is present in testcase
        #and not from testsuite execution
        #2.execute setup steps if testwrapperfile is present in testcase
        #and from testsuite execution and testwrapperfile is not defined in test suite.
        if (testwrapperfile and not from_ts) or (testwrapperfile and \
            from_ts and not data_repository.has_key('suite_testwrapper_file')):
            setup_step_list = common_execution_utils.get_step_list(
                testwrapperfile, "Setup", "step")
            if not len(setup_step_list):
                print_warning(
                    "step list is empty in {0} block".format("Setup"))

            print_info("****** SETUP STEPS EXECUTION STARTS *******")
            data_repository['wt_step_type'] = 'setup'
            #to consider relative paths provided from wrapperfile instead of testcase file
            original_tc_filepath = data_repository['wt_testcase_filepath']
            data_repository['wt_testcase_filepath'] = testwrapperfile
            setup_tc_status = execute_steps(j_data_type, j_runtype, \
                data_repository, setup_step_list, tc_junit_object, iter_ts_sys)
            #reset to original testcase filepath
            data_repository['wt_testcase_filepath'] = original_tc_filepath
            data_repository['wt_step_type'] = 'step'
            print_info("setup_tc_status : {0}".format(setup_tc_status))
            print_info("****** SETUP STEPS EXECUTION ENDS *******")

        if setup_on_error_action == 'next' or \
            (setup_on_error_action == 'abort' \
            and isinstance(setup_tc_status, bool) and setup_tc_status):
            if steps_tag == "Steps":
                print_info("****** TEST STEPS EXECUTION STARTS *******")
            data_repository['wt_step_type'] = 'step'
            tc_status = execute_steps(data_type, runtype, \
                data_repository, step_list, tc_junit_object, iter_ts_sys)
            if steps_tag == "Steps":
                print_info("****** TEST STEPS EXECUTION ENDS *******")
        else:
            print_error("Test steps are not executed as setup steps failed to execute,"\
                        "setup status : {0}".format(setup_tc_status))
            print_error("Steps in cleanup will be executed on besteffort")
            tc_status = "ERROR"

        #1.execute cleanup steps if testwrapperfile is present in testcase
        #and not from testsuite execution
        #2.execute cleanup steps if testwrapperfile is present in testcase
        #and from testsuite execution and testwrapperfile is not defined in test suite.
        if (testwrapperfile and not from_ts) or (testwrapperfile and \
            from_ts and not data_repository.has_key('suite_testwrapper_file')):
            cleanup_step_list = common_execution_utils.get_step_list(
                testwrapperfile, "Cleanup", "step")
            if not len(cleanup_step_list):
                print_warning(
                    "step list is empty in {0} block".format("Cleanup"))
            print_info("****** CLEANUP STEPS EXECUTION STARTS *******")
            data_repository['wt_step_type'] = 'cleanup'
            original_tc_filepath = data_repository['wt_testcase_filepath']
            #to consider relative paths provided from wrapperfile instead of testcase file
            data_repository['wt_testcase_filepath'] = testwrapperfile
            cleanup_tc_status = execute_steps(j_data_type, j_runtype, \
                data_repository, cleanup_step_list, tc_junit_object, iter_ts_sys)
            #reset to original testcase filepath
            data_repository['wt_testcase_filepath'] = original_tc_filepath
            data_repository['wt_step_type'] = 'step'
            print_info("cleanup_tc_status : {0}".format(cleanup_tc_status))
            print_info("****** CLEANUP STEPS EXECUTION ENDS *******")

    if tc_context.upper() == 'NEGATIVE':
        if all([tc_status != 'EXCEPTION', tc_status != 'ERROR']):
            print_debug(
                "Test case status is: '{0}', flip status as context is "
                "negative".format(tc_status))
            tc_status = not tc_status
    if step_list and isinstance(tc_status, bool) and isinstance(cleanup_tc_status, bool) \
        and tc_status and cleanup_tc_status:
        tc_status = True
    #set tc status to WARN if only cleanup fails
    elif step_list and isinstance(
            tc_status, bool) and tc_status and cleanup_tc_status != True:
        print_warning("setting tc status to WARN as cleanup failed")
        tc_status = "WARN"

    if step_list and tc_status == False and tc_onError_action and tc_onError_action.upper(
    ) == 'ABORT_AS_ERROR':
        print_info("Testcase status will be marked as ERROR as onError "
                   "action is set to 'abort_as_error'")
        tc_status = "ERROR"

    defectsdir = data_repository['wt_defectsdir']
    check_and_create_defects(tc_status, auto_defects, data_repository,
                             tc_junit_object)

    print_info("\n")
    tc_end_time = Utils.datetime_utils.get_current_timestamp()
    print_info("[{0}] Testcase execution completed".format(tc_end_time))
    tc_duration = Utils.datetime_utils.get_time_delta(tc_start_time)
    hms = Utils.datetime_utils.get_hms_for_seconds(tc_duration)
    print_info("Testcase duration= {0}".format(hms))

    tc_junit_object.update_count(tc_status, "1", "ts",
                                 data_repository['wt_ts_timestamp'])
    tc_junit_object.update_count("tests", "1", "ts",
                                 data_repository['wt_ts_timestamp'])
    tc_junit_object.update_count("tests", "1", "pj", "not appicable")
    tc_junit_object.update_attr("status", str(tc_status), "tc", tc_timestamp)
    tc_junit_object.update_attr("time", str(tc_duration), "tc", tc_timestamp)
    tc_junit_object.add_testcase_message(tc_timestamp, tc_status)
    if str(tc_status).upper() in ["FALSE", "ERROR", "EXCEPTION"]:
        tc_junit_object.update_attr("defects", defectsdir, "tc", tc_timestamp)

    # Adding resultsdir, logsdir, title as attributes to testcase_tag in the junit result file
    # Need to remove these after making resultsdir, logsdir as part of properties tag in testcase
    tc_junit_object.update_attr(
        "resultsdir", os.path.dirname(data_repository['wt_resultsdir']), "tc",
        tc_timestamp)
    tc_junit_object.update_attr("logsdir",
                                os.path.dirname(data_repository['wt_logsdir']),
                                "tc", tc_timestamp)
    data_file = data_repository["wt_datafile"]
    system_name = ""
    try:
        tree = et.parse(data_file)
        for elem in tree.iter():
            if elem.tag == "system":
                for key, value in elem.items():
                    if value == "kafka_producer":
                        system_name = elem.get("name")
                        break
    except:
        pass
    if system_name:
        junit_file_obj = data_repository['wt_junit_object']
        root = junit_file_obj.root
        suite_details = root.findall("testsuite")[0]
        test_case_details = suite_details.findall("testcase")[0]
        print_info("kafka server is presented in Inputdata file..")
        system_details = _get_system_or_subsystem(data_file, system_name)
        data = {}
        for item in system_details.getchildren():
            if item.tag == "kafka_port":
                ssh_port = item.text
                continue
            if item.tag == "ip":
                ip_address = item.text
                continue
            try:
                value = ast.literal_eval(item.text)
            except ValueError:
                value = item.text
            data.update({item.tag: value})
        ip_port = ["{}:{}".format(ip_address, ssh_port)]
        data.update({"bootstrap_servers": ip_port})
        data.update({"value_serializer": lambda x: dumps(x).encode('utf-8')})
        try:
            producer = WarriorKafkaProducer(**data)
            producer.send_messages('warrior_results', suite_details.items())
            producer.send_messages('warrior_results',
                                   test_case_details.items())
            print_info("message published to topic: warrior_results {}".format(
                suite_details.items()))
            print_info("message published to topic: warrior_results {}".format(
                test_case_details.items()))
        except:
            print_warning("Unable to connect kafka server !!")
    report_testcase_result(tc_status, data_repository, tag=steps_tag)
    if not from_ts:
        tc_junit_object.update_count(tc_status, "1", "pj", "not appicable")
        tc_junit_object.update_count("suites", "1", "pj", "not appicable")
        tc_junit_object.update_attr("status", str(tc_status), "ts",
                                    data_repository['wt_ts_timestamp'])
        tc_junit_object.update_attr("status", str(tc_status), "pj",
                                    "not appicable")
        tc_junit_object.update_attr("time", str(tc_duration), "ts",
                                    data_repository['wt_ts_timestamp'])
        tc_junit_object.update_attr("time", str(tc_duration), "pj",
                                    "not appicable")

        tc_junit_object.output_junit(data_repository['wt_resultsdir'])

        # Save JUnit/HTML results of the Case in MongoDB server
        if data_repository.get("db_obj") is not False:
            tc_junit_xml = data_repository['wt_resultsdir'] + os.sep +\
                tc_junit_object.filename + "_junit.xml"
            data_repository.get("db_obj").add_html_result_to_mongodb(
                tc_junit_xml)
    else:
        # send an email on TC failure(no need to send an email here when
        # executing a single case).
        if str(tc_status).upper() in ["FALSE", "ERROR", "EXCEPTION"]:
            email_setting = None
            # for first TC failure
            if "any_failures" not in data_repository:
                email_params = email.get_email_params("first_failure")
                if all(value != "" for value in email_params[:3]):
                    email_setting = "first_failure"
                data_repository['any_failures'] = True
            # for further TC failures
            if email_setting is None:
                email_params = email.get_email_params("every_failure")
                if all(value != "" for value in email_params[:3]):
                    email_setting = "every_failure"

            if email_setting is not None:
                email.compose_send_email("Test Case: ", data_repository[\
                 'wt_testcase_filepath'], data_repository['wt_logsdir'],\
                 data_repository['wt_resultsdir'], tc_status, email_setting)

        if not tc_parallel and not data_repository["war_parallel"]:
            if 'wp_results_execdir' in data_repository:
                # Create and replace existing Project junit file for each case
                tc_junit_object.output_junit(
                    data_repository['wp_results_execdir'], print_summary=False)
            else:
                # Create and replace existing Suite junit file for each case
                tc_junit_object.output_junit(
                    data_repository['wt_results_execdir'], print_summary=False)

    if tc_parallel:
        tc_impact = data_repository['wt_tc_impact']
        if tc_impact.upper() == 'IMPACT':
            msg = "Status of the executed test case impacts Testsuite result"
        elif tc_impact.upper() == 'NOIMPACT':
            msg = "Status of the executed test case does not impact Teststuie result"
        print_debug(msg)
        tc_name = Utils.file_Utils.getFileName(testcase_filepath)
        # put result into multiprocessing queue and later retrieve in corresponding driver
        queue.put(
            (tc_status, tc_name, tc_impact, tc_duration, tc_junit_object))

    # Save XML results of the Case in MongoDB server
    if data_repository.get("db_obj") is not False:
        data_repository.get("db_obj").add_xml_result_to_mongodb(
            data_repository['wt_resultfile'])

    # main need tc_status and data_repository values to unpack
    return tc_status, data_repository
Esempio n. 20
0
    def send_testdata_cmds(self, testdatafile, **args):
        """
        - Parses the testdata file and
          - gets the command details for rows marked execute=yes (or)
          - gets the command details for rows marked execute=yes and row=str_rownum (or)
          - gets the command details for rows marked execute=yes and title=strtitle
        - Sends the obtained commands to the paramiko session.
        - If the commands have verification attribute set,
        then verifies the verification text for presence/absence as defined
        in the respective found attribute in the testdatfile.

        :Arguments:
            1. testdatafile = the xml file where command details are available
            2. str_rownum = row number of testdata command block to be searched for
               in the testdata file
            3. strtitle = title of testdata command block title to be searched
               for in the testdata file
        :Returns:
            1. finalresult = boolean
        """
        finalresult = True
        varconfigfile = args.get('varconfigfile', None)
        title = args.get('title', None)
        row = args.get('row', None)
        details_dict = data_Utils.get_command_details_from_testdata(
            testdatafile, varconfigfile, title=title, row=row)
        command_list = details_dict["command_list"]
        stepdesc = "Send the following commands: %s" % command_list
        testcase_Utils.pNote(stepdesc)
        if command_list == False:
            finalresult = False
            command_list = []
        intsize = len(command_list)
        # Send Commands
        for i in range(0, intsize):
            command = details_dict["command_list"][i]
            startprompt = details_dict["startprompt_list"][i]
            endprompt = details_dict["endprompt_list"][i]
            cmd_timeout = details_dict["timeout_list"][i]
            retry = details_dict["retry_list"][i]
            retry_timer = details_dict["retry_timer_list"][i]
            retry_onmatch = details_dict["retry_onmatch_list"][i]
            retry_count = details_dict["retry_count_list"][i]
            sleeptime = details_dict["sleeptime_list"][i]
            verify_text_list = details_dict["verify_text_list"][i]
            verify_context_list = details_dict["verify_context_list"][i]
            sleep = {
                None: 0,
                "": 0,
                "none": 0
            }.get(str(sleeptime).lower(), str(sleeptime))
            sleep = int(sleep)
            print("\n")
            print_debug(">>>")
            testcase_Utils.pNote("Command #{0}\t: {1}".format(
                str(i + 1), command))
            testcase_Utils.pNote("startprompt\t: {0}".format(startprompt))
            testcase_Utils.pNote("endprompt\t: {0}".format(endprompt))
            testcase_Utils.pNote("sleeptime\t: {0}".format(sleep))
            result, response = self.get_response(command_list[i])
            if result and result is not 'ERROR':
                if verify_text_list is not None:
                    result = data_Utils.verify_cmd_response(
                        verify_text_list, verify_context_list, command,
                        response, varconfigfile)
            command_status = {
                True: "Pass",
                False: "Fail",
                "ERROR": "ERROR"
            }.get(result)
            print_info("Command status: {0}".format(command_status))
            print_debug("<<<")
            if sleep > 0:
                testcase_Utils.pNote(
                    "Sleep time of '{0} seconds' requested post command execution"
                    .format(sleep))
                time.sleep(sleep)
            if result == "ERROR" or finalresult == "ERROR":
                result = "ERROR"
                finalresult = "ERROR"
            finalresult = finalresult and result
        return finalresult
Esempio n. 21
0
def execute_parallel_testcases(testcase_list,
                               suite_repository,
                               data_repository,
                               from_project,
                               tc_parallel=True,
                               auto_defects=False,
                               iter_ts_sys=None):
    """Takes a list of testcase as input and executes them in parallel by
    creating separate process of testcase_driver for each of these testcase """

    jobs_list = []
    output_q = None
    suite = suite_repository['suite_name']
    testsuite_filepath = suite_repository['testsuite_filepath']
    suite_error_action = suite_repository['def_on_error_action']
    jiraproj = data_repository["jiraproj"]
    testsuite_dir = os.path.dirname(testsuite_filepath)

    for testcase in testcase_list:
        target_module = testcase_driver.main
        tc_rel_path = testsuite_utils.get_path_from_xmlfile(testcase)
        if tc_rel_path is not None:
            tc_path = Utils.file_Utils.getAbsPath(tc_rel_path, testsuite_dir)
        else:
            tc_path = str(tc_rel_path)
        tc_runtype = testsuite_utils.get_runtype_from_xmlfile(testcase)
        tc_impact = Utils.testcase_Utils.get_impact_from_xmlfile(testcase)
        tc_context = Utils.testcase_Utils.get_context_from_xmlfile(testcase)
        suite_step_data_file = testsuite_utils.get_data_file_at_suite_step(
            testcase, suite_repository)
        tc_onError_action = Utils.xml_Utils.get_attributevalue_from_directchildnode(
            testcase, 'onError', 'action')
        tc_onError_action = tc_onError_action if tc_onError_action else suite_error_action
        if suite_step_data_file is not None:
            data_file = Utils.file_Utils.getAbsPath(suite_step_data_file,
                                                    testsuite_dir)
            data_repository[tc_path] = data_file

        data_repository['wt_tc_impact'] = tc_impact

        # instead of using args_list, we need to use an ordered dict
        # for tc args because intially q will be none and
        # we need to cange it after creating a new q
        # then we need to maintain the position of arguments
        # before calling the testcase driver main function.

        tc_args_dict = OrderedDict([("tc_path", tc_path),
                                    ("data_repository", data_repository),
                                    ("tc_context", tc_context),
                                    ("tc_runtype", tc_runtype),
                                    ("tc_parallel", tc_parallel),
                                    ("auto_defects", auto_defects),
                                    ("suite", suite),
                                    ("tc_onError_action", tc_onError_action),
                                    ("iter_ts_sys", iter_ts_sys),
                                    ("output_q", output_q),
                                    ("jiraproj", jiraproj)])

        process, jobs_list, output_q = create_and_start_process_with_queue(
            target_module, tc_args_dict, jobs_list, output_q)

    print_debug("process: {0}".format(process))
    for job in jobs_list:
        job.join()

    result_list = get_results_from_queue(output_q)

    tc_status_list = []
    tc_name_list = []
    tc_impact_list = []
    tc_duration_list = []
    # Get the junit object of each testcase, extract the information from it and combine with testsuite junit object
    tc_junit_list = []

    for result in result_list:
        tc_status_list.append(result[0])
        tc_name_list.append(result[1])
        tc_impact_list.append(result[2])
        tc_duration_list.append(result[3])
        tc_junit_list.append(result[4])

    # parallel testcases generate multiple testcase junit result files
    # each files log the result for one testcase and not intergrated
    # update testsuite junit result file with individual testcase result files
    data_repository['wt_junit_object'] = update_ts_junit_resultfile(
        data_repository['wt_junit_object'], tc_junit_list)
    testsuite_status = Utils.testcase_Utils.compute_status_using_impact(
        tc_status_list, tc_impact_list)
    return testsuite_status
def execute_sequential_testcases(testcase_list, suite_repository,
                                 data_repository, from_project, auto_defects,
                                 iter_ts_sys, tc_parallel, queue, ts_iter):
    """Executes the list of cases(of a suite) in sequential order
        - Takes a testcase_list as input and sends
        each case to Basedriver for execution.
        - Computes the suite status based on the case_status
        and the impact value of the case
        - Handles case failures as per the default/specific
        onError action/value
        - Calls the function to report the suite status

    :Arguments:
        1. testcase_list(list) = List of cases to be executed
        2. suite_repository(dict) = suite repository
        3. data_repository(dict) = Warrior data repository
        4. from_project(boolean) = True for Project execution else False
        5. auto_defects(boolean) = True for Jira auto defect creation else False
        6. iter_ts_sys(string) = System for iterative execution
        7. tc_parallel(boolean) = True for Parallel execution else False
        8. queue = Python multiprocessing queue for parallel execution
        9. ts_iter(boolean) = True for 'iterative_parallel' execution else False
    :Returns:
        1. suite_status - overall suite status

    """
    goto_tc = False

    junit_resultfile = suite_repository['junit_resultfile']
    suite_name = suite_repository['suite_name']
    testsuite_filepath = suite_repository['testsuite_filepath']
    suite_error_action = suite_repository['def_on_error_action']
    suite_error_value = suite_repository['def_on_error_value']
    testsuite_dir = os.path.dirname(testsuite_filepath)

    errors = 0
    skipped = 0
    failures = 0
    tests = 0
    tc_duration = 0
    tc_status_list = []
    tc_impact_list = []
    impact_dict = {"IMPACT": "Impact", "NOIMPACT": "No Impact"}
    tc_duration_list = []
    tc_junit_list = []

    while tests < len(testcase_list):
        testcase = testcase_list[tests]
        tests += 1

        tc_rel_path = testsuite_utils.get_path_from_xmlfile(testcase)
        if tc_rel_path is not None:
            tc_path = Utils.file_Utils.getAbsPath(tc_rel_path, testsuite_dir)
        else:
            # if tc_rel_path is None, what are we doing here?
            tc_path = str(tc_rel_path)
        print_info('\n')
        print_debug("<<<< Starting execution of Test case: {0}>>>>".
                    format(tc_path))
        action, tc_status = exec_type_driver.main(testcase)
        tc_runtype = testsuite_utils.get_runtype_from_xmlfile(testcase)
        tc_impact = Utils.testcase_Utils.get_impact_from_xmlfile(testcase)
        tc_context = Utils.testcase_Utils.get_context_from_xmlfile(testcase)
        suite_step_data_file = testsuite_utils.get_data_file_at_suite_step(
                                                testcase, suite_repository)
        tc_onError_action = Utils.xml_Utils.get_attributevalue_from_directchildnode(
                                            testcase, 'onError', 'action')
        tc_onError_action = tc_onError_action if tc_onError_action else suite_error_action
        if suite_step_data_file is not None:
            data_file = Utils.file_Utils.getAbsPath(suite_step_data_file,
                                                    testsuite_dir)
            data_repository[tc_path] = data_file
        data_repository['wt_tc_impact'] = tc_impact
        if testcase.find("runmode") is not None and \
           testcase.find("runmode").get("attempt") is not None:
            print_info("testcase attempt: {0}".format(
                                testcase.find("runmode").get("attempt")))
        if testcase.find("retry") is not None and \
           testcase.find("retry").get("attempt") is not None:
            print_info("testcase attempt: {0}".format(
                                testcase.find("retry").get("attempt")))

        if Utils.file_Utils.fileExists(tc_path):
            tc_name = Utils.file_Utils.getFileName(tc_path)
            testsuite_utils.pSuite_testcase(junit_resultfile, suite_name,
                                            tc_name, time='0')

            if not goto_tc and action is True:
                try:
                    tc_result = testcase_driver.main(tc_path,
                                                     data_repository,
                                                     tc_context,
                                                     runtype=tc_runtype,
                                                     auto_defects=auto_defects,
                                                     suite=suite_name,
                                                     tc_onError_action=tc_onError_action,
                                                     iter_ts_sys=iter_ts_sys)

                    tc_status = tc_result[0]
                    tc_duration = tc_result[1]
                except Exception:
                    print_error('unexpected error {0}'.format(
                                                    traceback.format_exc()))
                    tc_status, tc_duration = False, False
                    tc_impact = Utils.testcase_Utils.get_impact_from_xmlfile(
                                                                    testcase)

            elif goto_tc and goto_tc == str(tests) and action is True:

                try:
                    tc_result = testcase_driver.main(tc_path,
                                                     data_repository,
                                                     tc_context,
                                                     runtype=tc_runtype,
                                                     auto_defects=auto_defects,
                                                     suite=suite_name,
                                                     tc_onError_action=tc_onError_action,
                                                     iter_ts_sys=iter_ts_sys)
                    tc_status = tc_result[0]
                    tc_duration = tc_result[1]
                    goto_tc = False

                except Exception:
                    print_error('unexpected error {0}'.format(
                                                    traceback.format_exc()))
                    tc_status, tc_duration = False, False
                    tc_impact = Utils.testcase_Utils.get_impact_from_xmlfile(
                                                                    testcase)

            else:
                print_info('skipped testcase %s ' % tc_name)
                skipped += 1
                testsuite_utils.pSuite_testcase_skip(junit_resultfile)
                testsuite_utils.pSuite_update_suite_attributes(
                                junit_resultfile, str(errors), str(skipped),
                                str(tests), str(failures), time='0')
                data_repository['wt_junit_object'].update_count(
                                "skipped", "1", "ts",
                                data_repository['wt_ts_timestamp'])
                data_repository['wt_junit_object'].update_count(
                                "tests", "1", "ts",
                                data_repository['wt_ts_timestamp'])
                data_repository['wt_junit_object'].update_count(
                                "tests", "1", "pj", "not applicable")
                tmp_timestamp = str(Utils.datetime_utils.get_current_timestamp())
                time.sleep(2)
                data_repository['wt_junit_object'].create_testcase(
                                location="from testsuite",
                                timestamp=tmp_timestamp,
                                ts_timestamp=data_repository['wt_ts_timestamp'],
                                classname=data_repository['wt_suite_name'],
                                name=os.path.splitext(tc_name)[0])
                data_repository['wt_junit_object'].add_testcase_message(
                                                    tmp_timestamp, "skipped")
                data_repository['wt_junit_object'].update_attr(
                                "status", "SKIPPED", "tc", tmp_timestamp)
                data_repository['testcase_%d_result' % tests] = "SKIP"
                title = Utils.xml_Utils.getChildTextbyParentTag(
                                        tc_path, 'Details', 'Title')
                title = title.strip() if title else "None"
                data_repository['wt_junit_object'].update_attr(
                                "title", title, "tc", tmp_timestamp)
                data_repository['wt_junit_object'].update_attr(
                                "impact", impact_dict.get(tc_impact.upper()),
                                "tc", tmp_timestamp)
                data_repository['wt_junit_object'].update_attr(
                                "onerror", "N/A", "tc", tmp_timestamp)
                data_repository['wt_junit_object'].output_junit(
                                data_repository['wt_results_execdir'],
                                print_summary=False)
                continue

        else:
            errors += 1
            msg = print_error("Test case does not exist in the provided path: "
                              "{0}".format(tc_path))
            testsuite_utils.pSuite_testcase(junit_resultfile, suite_name,
                                            tc_path, time='0')
            testsuite_utils.pSuite_testcase_error(junit_resultfile, msg, '0')
            tc_status = "ERROR"
            if goto_tc and goto_tc == str(tests):
                goto_tc = False
            elif goto_tc and goto_tc != str(tests):
                data_repository['testcase_%d_result' % tests] = "ERROR"
                continue

        goto_tc_num = onerror_driver.main(testcase, suite_error_action,
                                          suite_error_value)
        if goto_tc_num is False:
            onerror = "Next"
        elif goto_tc_num == "ABORT":
            onerror = "Abort"
        else:
            onerror = "Goto:" + str(goto_tc_num)
        data_repository['wt_junit_object'].update_attr(
                        "impact", impact_dict.get(tc_impact.upper()), "tc",
                        data_repository['wt_tc_timestamp'])
        data_repository['wt_junit_object'].update_attr(
                        "onerror", onerror, "tc",
                        data_repository['wt_tc_timestamp'])

        tc_status_list.append(tc_status)
        tc_duration_list.append(tc_duration)

        string_status = {"TRUE": "PASS", "FALSE": "FAIL", "ERROR": "ERROR",
                         "SKIP": "SKIP"}

        if str(tc_status).upper() in list(string_status.keys()):
            data_repository['testcase_%d_result' % tests] = string_status[
                                                    str(tc_status).upper()]
        else:
            print_error("unexpected testcase status, default to exception")
            data_repository['testcase_%d_result' % tests] = "ERROR"

        tc_impact_list.append(tc_impact)
        if tc_impact.upper() == 'IMPACT':
            msg = "Status of the executed test case impacts Testsuite result"
        elif tc_impact.upper() == 'NOIMPACT':
            msg = "Status of the executed test case does not impact "
            "Teststuie result"
        print_debug(msg)

        runmode, value, _ = common_execution_utils.get_runmode_from_xmlfile(
                                                                testcase)
        retry_type, retry_cond, retry_cond_value, retry_value, \
            retry_interval = common_execution_utils.get_retry_from_xmlfile(testcase)
        if runmode is not None:
            if tc_status is True:
                testsuite_utils.update_tc_duration(str(tc_duration))
                # if runmode is 'rup' & tc_status is True, skip the repeated
                # execution of same testcase and move to next actual testcase
                if runmode == "rup":
                    goto_tc = str(value)
            elif tc_status == 'ERROR' or tc_status == 'EXCEPTION':
                errors += 1
                testsuite_utils.pSuite_testcase_error(
                            junit_resultfile,
                            'Encountered error/exception during TC execution',
                            str(tc_duration))
                goto_tc = onerror_driver.main(testcase, suite_error_action,
                                              suite_error_value)
                if goto_tc in ['ABORT', 'ABORT_AS_ERROR']:
                    update_suite_attribs(junit_resultfile, str(errors),
                                         str(skipped), str(tests),
                                         str(failures), time='0')
                    break
                # when 'onError:goto' value is less than the current tc num,
                # change the next iteration point to goto value
                elif goto_tc and int(goto_tc) < tests:
                    tests = int(goto_tc)-1
                    goto_tc = False
            elif tc_status is False:
                failures += 1
                testsuite_utils.pSuite_testcase_failure(junit_resultfile,
                                                        time=str(tc_duration))
                goto_tc = onerror_driver.main(testcase, suite_error_action,
                                              suite_error_value)
                if goto_tc in ['ABORT', 'ABORT_AS_ERROR']:
                    update_suite_attribs(junit_resultfile, str(errors),
                                         str(skipped), str(tests),
                                         str(failures), time='0')
                    break
                # when 'onError:goto' value is less than the current tc num,
                # change the next iteration point to goto value
                elif goto_tc and int(goto_tc) < tests:
                    tests = int(goto_tc)-1
                    goto_tc = False
                # if runmode is 'ruf' & tc_status is False, skip the repeated
                # execution of same testcase and move to next actual testcase
                if not goto_tc and runmode == "ruf":
                    goto_tc = str(value)
        elif retry_type is not None:
            if retry_type.upper() == 'IF':
                try:
                    if data_repository[retry_cond] == retry_cond_value:
                        condition_met = True
                        pNote("Wait for {0}sec before retrying".format(
                                                        retry_interval))
                        pNote("The given condition '{0}' matches the expected "
                              "value '{1}'".format(data_repository[retry_cond],
                                                   retry_cond_value))
                        time.sleep(int(retry_interval))
                    else:
                        condition_met = False
                        print_warning("The condition value '{0}' does not "
                                      "match with the expected value "
                                      "'{1}'".format(
                                        data_repository[retry_cond],
                                        retry_cond_value))
                except KeyError:
                    print_warning("The given condition '{0}' is not there in "
                                  "the data repository".format(
                                                    retry_cond_value))
                    condition_met = False
                if condition_met is False:
                    goto_tc = str(retry_value)
            else:
                if retry_type.upper() == 'IF NOT':
                    try:
                        if data_repository[retry_cond] != retry_cond_value:
                            condition_met = True
                            pNote("Wait for {0}sec before retrying".format(
                                                            retry_interval))
                            pNote("The condition value '{0}' does not match "
                                  "with the expected value "
                                  "'{1}'".format(data_repository[retry_cond],
                                                 retry_cond_value))
                            time.sleep(int(retry_interval))
                        else:
                            condition_met = False
                            print_warning("The given condition '{0}' matches "
                                          "the expected value "
                                          "'{1}'".format(
                                                data_repository[retry_cond],
                                                retry_cond_value))
                    except KeyError:
                        condition_met = False
                        print_warning("The given condition '{0}' is not there "
                                      "in the data repository".format(
                                                            retry_cond_value))
                    if condition_met is False:
                        pNote("The given condition '{0}' matched with the "
                              "value '{1}'".format(data_repository[retry_cond],
                                                   retry_cond_value))
                        goto_tc = str(retry_value)
# suite_status = testsuite_utils.compute_testsuite_status(suite_status,
# tc_status, tc_impact)
        update_suite_attribs(junit_resultfile, str(errors),
                             str(skipped), str(tests), str(failures),
                             time='0')
        # junit_object/python_process is different for all the cases
        # executed in parallel
        if ts_iter is False:
            tc_junit_list.append(data_repository['wt_junit_object'])

    # junit_object/python_process is same for all the cases executed in the
    # same system for 'iterative_parallel' suite execution
    if ts_iter is True:
        tc_junit_list = data_repository['wt_junit_object']

    suite_status = Utils.testcase_Utils.compute_status_using_impact(
                                        tc_status_list, tc_impact_list)

    if tc_parallel:
        tc_impact = data_repository['wt_tc_impact']
        if tc_impact.upper() == 'IMPACT':
            msg = "Status of the executed test case impacts Testsuite result"
        elif tc_impact.upper() == 'NOIMPACT':
            msg = "Status of the executed test case does not impact Teststuie result"
        print_debug(msg)
        tc_name = Utils.file_Utils.getFileName(tc_path)
        # put result into multiprocessing queue and later retrieve in
        # corresponding driver
        queue.put((tc_status_list, tc_name, tc_impact_list, tc_duration_list,
                   tc_junit_list))
    return suite_status
def execute_project(project_filepath, auto_defects, jiraproj, res_startdir,
                    logs_startdir, data_repository):
    """
    - Takes a list of testsuite locations input.
    - Iterates over the list and sends each testsuite
    location to testsuite_driver for execution.
    - Gets the status of the testsuite from the
    Warrior and computes the project_status based on the impact value
    of the testsuite.
    - If the testsuite fails, handles the failure using
    the default or specific  onError action,value.
    - Finally reports the project status to the result file.

    Arguments:
    1. testsuite_list        = (list) list of testsuite locations
    2. testsuite_driver      = (module loader) module loader of the testsuite_driver
    3. project_repository    = (dict) dictionary containing all data of the project under execution
    """
    project_start_time = Utils.datetime_utils.get_current_timestamp()
    print_info("[{0}] Project execution starts".format(project_start_time))
    suite_cntr = 0
    # project_status = True
    goto_testsuite = False
    ts_status_list = []
    ts_impact_list = []
    impact_dict = {"IMPACT": "Impact", "NOIMPACT": "No Impact"}
    project_dir = os.path.dirname(project_filepath)
    project_title = Utils.xml_Utils.getChildTextbyParentTag(
        project_filepath, 'Details', 'Title')
    project_repository = get_project_details(project_filepath, res_startdir,
                                             logs_startdir, data_repository)
    project_repository['project_title'] = project_title
    testsuite_list = get_testsuite_list(project_filepath)

    # project_resultfile = project_repository['project_resultfile']

    project_name = project_repository['project_name']
    wp_results_execdir = project_repository['wp_results_execdir']
    data_repository['wp_results_execdir'] = wp_results_execdir
    wp_logs_execdir = project_repository['wp_logs_execdir']

    project_error_action = project_repository['def_on_error_action']
    project_error_value = project_repository['def_on_error_value']

    pj_junit_object = junit_class.Junit(filename=project_name,
                                        timestamp=project_start_time,
                                        name=project_name,
                                        display="True")

    pj_junit_object.update_attr("resultsdir",
                                project_repository['project_execution_dir'],
                                "pj", project_start_time)
    pj_junit_object.update_attr("title", project_repository['project_title'],
                                "pj", project_start_time)
    pj_junit_object.add_property("resultsdir",
                                 project_repository['project_execution_dir'],
                                 "pj", project_start_time)

    # adding the resultsdir as attribute, need to be removed after making it
    # a property
    pj_junit_object.add_project_location(project_filepath)
    if "jobid" in data_repository:
        pj_junit_object.add_jobid(data_repository["jobid"])
        del data_repository["jobid"]
    data_repository['wt_junit_object'] = pj_junit_object

    while suite_cntr < len(testsuite_list):
        testsuite = testsuite_list[suite_cntr]
        # suite_junit_type = 'file'
        suite_cntr += 1

        testsuite_rel_path = testsuite_utils.get_path_from_xmlfile(testsuite)
        if testsuite_rel_path is not None:
            testsuite_path = Utils.file_Utils.getAbsPath(
                testsuite_rel_path, project_dir)
        else:
            testsuite_path = str(testsuite_rel_path)
        print_info("\n")
        print_debug("<<<< Starting execution of Test suite: {0}>>>>".format(
            testsuite_path))
        action, testsuite_status = exec_type_driver.main(testsuite)
        testsuite_impact = Utils.testcase_Utils.get_impact_from_xmlfile(
            testsuite)
        testsuite_name = Utils.file_Utils.getFileName(testsuite_path)
        testsuite_nameonly = Utils.file_Utils.getNameOnly(testsuite_name)
        ts_onError_action = Utils.xml_Utils.get_attributevalue_from_directchildnode(
            testsuite, 'onError', 'action')
        ts_onError_action = ts_onError_action if ts_onError_action else project_error_action
        if Utils.file_Utils.fileExists(testsuite_path):
            if not goto_testsuite and action is True:

                testsuite_result = testsuite_driver.main(
                    testsuite_path,
                    data_repository=data_repository,
                    from_project=True,
                    auto_defects=auto_defects,
                    jiraproj=jiraproj,
                    res_startdir=wp_results_execdir,
                    logs_startdir=wp_logs_execdir,
                    ts_onError_action=ts_onError_action)
                testsuite_status = testsuite_result[0]
                # testsuite_resultfile = testsuite_result[1]

            elif goto_testsuite and goto_testsuite == str(suite_cntr)\
                    and action is True:
                testsuite_result = testsuite_driver.main(
                    testsuite_path,
                    data_repository=data_repository,
                    from_project=True,
                    auto_defects=auto_defects,
                    jiraproj=jiraproj,
                    res_startdir=wp_results_execdir,
                    logs_startdir=wp_logs_execdir,
                    ts_onError_action=ts_onError_action)
                goto_testsuite = False
                testsuite_status = testsuite_result[0]
                # testsuite_resultfile = testsuite_result[1]

            else:
                msg = print_info(
                    'skipped testsuite: {0} '.format(testsuite_path))
                testsuite_resultfile = '<testsuite errors="0" failures="0" name="{0}" '\
                'skipped="0" tests="0" time="0" timestamp="{1}" > '\
                '<skipped message="{2}"/> </testsuite>'.format(testsuite_name,
                                                               project_start_time,
                                                               msg)
                tmp_timestamp = str(
                    Utils.datetime_utils.get_current_timestamp())
                time.sleep(2)
                pj_junit_object.create_testsuite(
                    location=os.path.dirname(testsuite_path),
                    name=testsuite_nameonly,
                    timestamp=tmp_timestamp,
                    **pj_junit_object.init_arg())
                pj_junit_object.update_attr("status", "SKIPPED", "ts",
                                            tmp_timestamp)
                pj_junit_object.update_attr("skipped", "1", "pj",
                                            tmp_timestamp)
                pj_junit_object.update_count("suites", "1", "pj",
                                             tmp_timestamp)
                data_repository['testsuite_{}_result'.format(
                    suite_cntr)] = "SKIP"
                # pj_junit_object.add_testcase_message(tmp_timestamp, "skipped")
                pj_junit_object.update_attr(
                    "impact", impact_dict.get(testsuite_impact.upper()), "ts",
                    tmp_timestamp)
                pj_junit_object.update_attr("onerror", "N/A", "ts",
                                            tmp_timestamp)
                pj_junit_object.output_junit(wp_results_execdir,
                                             print_summary=False)
                continue

        else:

            msg = print_error("Test suite does not exist in "
                              "provided path: {0}".format(testsuite_path))
            testsuite_status = 'ERROR'
            testsuite_resultfile = '<testsuite errors="0" failures="0" name="{0}" '\
            'skipped="0" tests="0" time="0" timestamp="{1}" > '\
            '<error message="{2}"/> </testsuite>'.format(testsuite_name, project_start_time, msg)
            # suite_junit_type = 'string'
            if goto_testsuite and goto_testsuite == str(suite_cntr):
                goto_testsuite = False
            elif goto_testsuite and goto_testsuite != str(suite_cntr):
                data_repository['testsuite_{}_result'.format(
                    suite_cntr)] = "ERROR"
                continue

        goto_testsuite_num = onerror_driver.main(testsuite,
                                                 project_error_action,
                                                 project_error_value)
        if goto_testsuite_num is False:
            onerror = "Next"
        elif goto_testsuite_num == "ABORT":
            onerror = "Abort"
        else:
            onerror = "Goto:" + str(goto_testsuite_num)
        pj_junit_object.update_attr("impact",
                                    impact_dict.get(testsuite_impact.upper()),
                                    "ts", data_repository['wt_ts_timestamp'])
        pj_junit_object.update_attr("onerror", onerror, "ts",
                                    data_repository['wt_ts_timestamp'])

        string_status = {
            "TRUE": "PASS",
            "FALSE": "FAIL",
            "ERROR": "ERROR",
            "SKIP": "SKIP"
        }

        if str(testsuite_status).upper() in string_status.keys():
            data_repository['testsuite_{}_result'.format(suite_cntr)] = string_status\
            [str(testsuite_status).upper()]
        else:
            print_error("unexpected testsuite status, default to exception")
            data_repository['testsuite_%d_result' % suite_cntr] = "ERROR"

        ts_status_list.append(testsuite_status)
        ts_impact_list.append(testsuite_impact)
        if testsuite_impact.upper() == 'IMPACT':
            msg = "Status of the executed test suite impacts Project result"
        elif testsuite_impact.upper() == 'NOIMPACT':
            msg = "Status of the executed test suite does not impact project result"
        print_debug(msg)
        # project_status = compute_project_status(project_status, testsuite_status,
        #                                                 testsuite_impact)
        runmode, value = common_execution_utils.get_runmode_from_xmlfile(
            testsuite)
        retry_type, retry_cond, retry_cond_value, retry_value,\
            retry_interval = common_execution_utils.get_retry_from_xmlfile(testsuite)
        if runmode is not None:
            if testsuite.find("runmode") is not None and\
              testsuite.find("runmode").get("attempt") is not None:
                print_info("runmode attempt: {0}".format(
                    testsuite.find("runmode").get("attempt")))
            # if runmode is 'ruf' & testsuite_status is False, skip the repeated execution of same
            # test suite and move to next actual test suite
            if not project_error_value and runmode == "RUF" and\
                    testsuite_status is False:
                goto_testsuite = str(value)
            # if runmode is 'rup' & testsuite_status is True, skip the repeated
            # execution of same testsuite and move to next actual testsuite
            elif runmode == "RUP" and testsuite_status is True:
                goto_testsuite = str(value)
        elif retry_type is not None:
            if testsuite.find("retry") is not None and\
              testsuite.find("retry").get("attempt") is not None:
                print_info("retry attempt: {0}".format(
                    testsuite.find("retry").get("attempt")))
            if retry_type.upper() == 'IF':
                try:
                    if data_repository[retry_cond] == retry_cond_value:
                        condition_met = True
                        pNote("Wait for {0}sec before retrying".format(
                            retry_interval))
                        pNote("The given condition '{0}' matches the expected"
                              "value '{1}'".format(data_repository[retry_cond],
                                                   retry_cond_value))
                        time.sleep(int(retry_interval))
                    else:
                        condition_met = False
                        print_warning(
                            "The condition value '{0}' does not match with the expected "
                            "value '{1}'".format(data_repository[retry_cond],
                                                 retry_cond_value))
                except KeyError:
                    print_warning(
                        "The given condition '{0}' do not exists in "
                        "the data repository".format(retry_cond_value))

                    condition_met = False
                if condition_met is False:
                    goto_testsuite = str(retry_value)
            else:
                if retry_type.upper() == 'IF NOT':
                    try:
                        if data_repository[retry_cond] != retry_cond_value:
                            condition_met = True
                            pNote("Wait for {0}sec before "
                                  "retrying".format(retry_interval))
                            pNote("The condition value '{0}' does not match "
                                  "with the expected value '{1}'".format(
                                      data_repository[retry_cond],
                                      retry_cond_value))
                            time.sleep(int(retry_interval))
                        else:
                            condition_met = False
                    except KeyError:
                        condition_met = False
                        print_warning(
                            "The given condition '{0}' is not there "
                            "in the data repository".format(retry_cond_value))
                    if condition_met is False:
                        pNote("The given condition '{0}' matched with the "
                              "value '{1}'".format(data_repository[retry_cond],
                                                   retry_cond_value))
                        goto_testsuite = str(retry_value)
        else:
            if testsuite_status is False or testsuite_status == "ERROR" or\
                    testsuite_status == "EXCEPTION":
                goto_testsuite = onerror_driver.main(testsuite,
                                                     project_error_action,
                                                     project_error_value)
            if goto_testsuite in ['ABORT', 'ABORT_AS_ERROR']:
                break
            # when 'onError:goto' value is less than the current ts num,
            # change the next iteration point to goto value
            elif goto_testsuite and int(goto_testsuite) < suite_cntr:
                suite_cntr = int(goto_testsuite) - 1
                goto_testsuite = False

    project_status = Utils.testcase_Utils.compute_status_using_impact(
        ts_status_list, ts_impact_list)
    print_info("\n")
    project_end_time = Utils.datetime_utils.get_current_timestamp()
    print_info("[{0}] Project execution completed".format(project_end_time))
    project_duration = Utils.datetime_utils.get_time_delta(project_start_time)
    hms = Utils.datetime_utils.get_hms_for_seconds(project_duration)
    print_info("Project duration= {0}".format(hms))

    project_status = report_project_result(project_status, project_repository)
    pj_junit_object.update_attr("status", str(project_status), "pj",
                                project_start_time)
    pj_junit_object.update_attr("time", str(project_duration), "pj",
                                project_start_time)

    pj_junit_object.output_junit(wp_results_execdir)

    # Save JUnit/HTML results of the Project in MongoDB server
    if data_repository.get("db_obj") is not False:
        pj_junit_xml = project_repository['wp_results_execdir'] +\
            os.sep + pj_junit_object.filename + "_junit.xml"
        data_repository.get("db_obj").add_html_result_to_mongodb(pj_junit_xml)

    return project_status, project_repository
Esempio n. 24
0
def execute_parallel_testsuites(testsuite_list,
                                project_repository,
                                data_repository,
                                auto_defects=False,
                                ts_parallel=True):
    """Takes a list of suites as input and executes them in parallel by
    creating separate process of testsuite_driver for each of these suite """

    jobs_list = []
    output_q = None
    impact_dict = {"IMPACT": "Impact", "NOIMPACT": "No Impact"}
    project_error_action = project_repository['def_on_error_action']
    project_filepath = project_repository['project_filepath']
    project_dir = os.path.dirname(project_filepath)
    wp_results_execdir = project_repository['wp_results_execdir']
    wp_logs_execdir = project_repository['wp_logs_execdir']
    jiraproj = data_repository["jiraproj"]

    for testsuite in testsuite_list:
        target_module = testsuite_driver.main
        testsuite_rel_path = testsuite_utils.get_path_from_xmlfile(testsuite)
        if testsuite_rel_path is not None:
            testsuite_path = Utils.file_Utils.getAbsPath(
                testsuite_rel_path, project_dir)
        else:
            testsuite_path = str(testsuite_rel_path)
        testsuite_impact = Utils.testcase_Utils.get_impact_from_xmlfile(
            testsuite)
        data_repository['wt_ts_impact'] = testsuite_impact
        ts_onError_action = Utils.xml_Utils.get_attributevalue_from_directchildnode(
            testsuite, 'onError', 'action')
        ts_onError_action = ts_onError_action if ts_onError_action else project_error_action

        tc_args_dict = OrderedDict([("testsuite_filepath", testsuite_path),
                                    ("data_repository", data_repository),
                                    ("from_project", True),
                                    ("auto_defects", auto_defects),
                                    ("jiraproj", jiraproj),
                                    ("res_startdir", wp_results_execdir),
                                    ("logs_startdir", wp_logs_execdir),
                                    ("ts_onError_action", ts_onError_action),
                                    ("output_q", output_q),
                                    ("ts_parallel", ts_parallel)])

        process, jobs_list, output_q = create_and_start_process_with_queue(
            target_module, tc_args_dict, jobs_list, output_q)

        print_debug("process: {0}".format(process))

    for job in jobs_list:
        job.join()

    result_list = get_results_from_queue(output_q)

    ts_status_list = []
    ts_impact_list = []
    ts_timestamp_list = []
    # Get the junit object of each suite, extract the information from it
    # and combine with project junit object
    ts_junit_list = []

    for result in result_list:
        ts_status_list.append(result[0])
        ts_impact_list.append(result[1])
        ts_timestamp_list.append(result[2])
        ts_junit_list.append(result[3])

    for i in range(len(ts_junit_list)):
        ts_junit_list[i].update_attr(
            "impact", impact_dict.get(ts_impact_list[i].upper()), "ts",
            ts_timestamp_list[i])
        # onerror is not applicable for parallel execution
        ts_junit_list[i].update_attr("onerror", "N/A", "ts",
                                     ts_timestamp_list[i])

    # parallel suites generate multiple suite junit result files
    # each files log the result for one suite and not integrated
    # update project junit result file with individual suite result files
    data_repository['wt_junit_object'] = update_pj_junit_resultfile(
        data_repository['wt_junit_object'], ts_junit_list)

    project_status = Utils.testcase_Utils.compute_status_using_impact(
        ts_status_list, ts_impact_list)
    return project_status
def execute_testcase(testcase_filepath, data_repository, tc_context, runtype,
                     tc_parallel, queue, auto_defects, suite, jiraproj,
                     tc_onError_action, iter_ts_sys):
    """ Executes the testcase (provided as a xml file)
            - Takes a testcase xml file as input and executes each command in the testcase.
            - Computes the testcase status based on the stepstatus and the impact value of the step
            - Handles step failures as per the default/specific onError action/value
            - Calls the function to report the testcase status

    :Arguments:
        1. testcase_filepath (string) = the full path of the testcase xml file
        2. execution_dir (string) = the full path of the directory under which the 
                                    testcase execution directory will be created
                                    (the results, logs for this testcase will be 
                                    stored in this testcase execution directory.)
    """

    tc_status = True
    tc_start_time = Utils.datetime_utils.get_current_timestamp()
    tc_timestamp = str(tc_start_time)
    print_info("[{0}] Testcase execution starts".format(tc_start_time))

    get_testcase_details(testcase_filepath, data_repository, jiraproj)

    # These lines are for creating testcase junit file
    from_ts = False
    if not 'wt_junit_object' in data_repository:
        # not from testsuite
        tc_junit_object = junit_class.Junit(
            filename=data_repository['wt_name'],
            timestamp=tc_timestamp,
            name="customProject_independant_testcase_execution",
            display="False")
        if "jobid" in data_repository:
            tc_junit_object.add_jobid(data_repository["jobid"])
            del data_repository["jobid"]
        tc_junit_object.create_testcase(
            location=data_repository['wt_filedir'],
            timestamp=tc_timestamp,
            ts_timestamp=tc_timestamp,
            name=data_repository['wt_name'],
            testcasefile_path=data_repository['wt_testcase_filepath'],
            display="False")
        junit_requirements(testcase_filepath, tc_junit_object, tc_timestamp)
        data_repository['wt_ts_timestamp'] = tc_timestamp
    else:
        tc_junit_object = data_repository['wt_junit_object']
        tc_junit_object.create_testcase(
            location="from testsuite",
            timestamp=tc_timestamp,
            ts_timestamp=data_repository['wt_ts_timestamp'],
            classname=data_repository['wt_suite_name'],
            name=data_repository['wt_name'],
            testcasefile_path=data_repository['wt_testcase_filepath'])
        from_ts = True
        junit_requirements(testcase_filepath, tc_junit_object,
                           data_repository['wt_ts_timestamp'])
    data_repository['wt_tc_timestamp'] = tc_timestamp
    data_type = data_repository['wt_data_type']

    # Adding resultsdir, logsdir, title as attributes to testcase_tag in the junit result file
    # Need to remove these after making resultsdir, logsdir as part of properties tag in testcase
    tc_junit_object.add_property(
        "resultsdir", os.path.dirname(data_repository['wt_resultsdir']), "tc",
        tc_timestamp)
    tc_junit_object.add_property(
        "logsdir", os.path.dirname(data_repository['wt_logsdir']), "tc",
        tc_timestamp)
    tc_junit_object.update_attr("title", data_repository['wt_title'], "tc",
                                tc_timestamp)
    data_repository['wt_junit_object'] = tc_junit_object

    data_repository['wt_junit_object'] = tc_junit_object
    print_testcase_details_to_console(testcase_filepath, data_repository)
    step_list = get_steps_list(testcase_filepath)

    tc_state = Utils.xml_Utils.getChildTextbyParentTag(testcase_filepath,
                                                       'Details', 'State')
    if tc_state is not False and tc_state is not None and \
       tc_state.upper() == "DRAFT":
        print_warning("Testcase is in 'Draft' state, it may have keywords "
                      "that have not been developed yet. Skipping the "
                      "testcase execution and it will be marked as 'ERROR'")
        tc_status = "ERROR"
    else:
        if data_type.upper() == 'CUSTOM' and \
         runtype.upper() == 'SEQUENTIAL_KEYWORDS':
            tc_status = execute_custom(data_type, runtype,
                                       custom_sequential_kw_driver,
                                       data_repository, step_list)
        elif data_type.upper() == 'CUSTOM' and \
                runtype.upper() == 'PARALLEL_KEYWORDS':
            tc_status = execute_custom(data_type, runtype,
                                       custom_parallel_kw_driver,
                                       data_repository, step_list)
        elif data_type.upper() == 'ITERATIVE' and \
                runtype.upper() == 'SEQUENTIAL_KEYWORDS':
            print_info("iterative sequential")
            system_list = get_system_list(data_repository['wt_datafile'],
                                          iter_req=True) \
                if iter_ts_sys is None else [iter_ts_sys]
            # print len(system_list)
            if len(system_list) == 0:
                print_warning("Datatype is iterative but no systems found in "
                              "input datafile, when Datatype is iterative the "
                              "InputDataFile should have system(s) to "
                              "iterate upon")
                tc_status = False
            elif len(system_list) > 0:
                tc_status = iterative_sequential_kw_driver.main(
                    step_list, data_repository, tc_status, system_list)
        elif data_type.upper() == 'ITERATIVE' and \
                runtype.upper() == 'PARALLEL_KEYWORDS':
            print_info("iterative parallel")
            system_list = get_system_list(data_repository['wt_datafile'],
                                          iter_req=True) \
                if iter_ts_sys is None else [iter_ts_sys]
            # print len(system_list)
            if len(system_list) == 0:
                print_warning("DataType is iterative but no systems found in "
                              "input datafile, when DataType id iterative the "
                              "InputDataFile should have system(s) to "
                              "iterate upon")
                tc_status = False
            elif len(system_list) > 0:
                tc_status = iterative_parallel_kw_driver.main(
                    step_list, data_repository, tc_status, system_list)
        elif data_type.upper() == "HYBRID":
            print_info("Hybrid")
            system_list, system_node_list = get_system_list(
                data_repository['wt_datafile'], node_req=True)
            # call the hybrid driver here
            hyb_drv_obj = hybrid_driver_class.HybridDriver(
                step_list, data_repository, tc_status, system_list,
                system_node_list)
            tc_status = hyb_drv_obj.execute_hybrid_mode()
        else:
            print_warning("unsupported value provided for testcase data_type "
                          "or testsuite runtype")
            tc_status = False

    if tc_context.upper() == 'NEGATIVE':
        if all([tc_status != 'EXCEPTION', tc_status != 'ERROR']):
            print_debug(
                "Test case status is: '{0}', flip status as context is negative"
                .format(tc_status))
            tc_status = not tc_status

    if tc_status == False and tc_onError_action and tc_onError_action.upper(
    ) == 'ABORT_AS_ERROR':
        print_info(
            "Testcase status will be marked as ERROR as onError action is set to 'abort_as_error'"
        )
        tc_status = "ERROR"

    check_and_create_defects(tc_status, auto_defects, data_repository,
                             tc_junit_object)

    print("\n")
    tc_end_time = Utils.datetime_utils.get_current_timestamp()
    print_info("[{0}] Testcase execution completed".format(tc_end_time))
    tc_duration = Utils.datetime_utils.get_time_delta(tc_start_time)
    hms = Utils.datetime_utils.get_hms_for_seconds(tc_duration)
    print_info("Testcase duration= {0}".format(hms))

    tc_junit_object.update_count(tc_status, "1", "ts",
                                 data_repository['wt_ts_timestamp'])
    tc_junit_object.update_count("tests", "1", "ts",
                                 data_repository['wt_ts_timestamp'])
    tc_junit_object.update_count("tests", "1", "pj", "not appicable")
    tc_junit_object.update_attr("status", str(tc_status), "tc", tc_timestamp)
    tc_junit_object.update_attr("time", str(tc_duration), "tc", tc_timestamp)
    tc_junit_object.add_testcase_message(tc_timestamp, tc_status)

    # Adding resultsdir, logsdir, title as attributes to testcase_tag in the junit result file
    # Need to remove these after making resultsdir, logsdir as part of properties tag in testcase
    tc_junit_object.update_attr(
        "resultsdir", os.path.dirname(data_repository['wt_resultsdir']), "tc",
        tc_timestamp)
    tc_junit_object.update_attr("logsdir",
                                os.path.dirname(data_repository['wt_logsdir']),
                                "tc", tc_timestamp)

    report_testcase_result(tc_status, data_repository)
    if not from_ts:
        tc_junit_object.update_count(tc_status, "1", "pj", "not appicable")
        tc_junit_object.update_count("suites", "1", "pj", "not appicable")
        tc_junit_object.update_attr("status", str(tc_status), "ts",
                                    data_repository['wt_ts_timestamp'])
        tc_junit_object.update_attr("status", str(tc_status), "pj",
                                    "not appicable")
        tc_junit_object.update_attr("time", str(tc_duration), "ts",
                                    data_repository['wt_ts_timestamp'])
        tc_junit_object.update_attr("time", str(tc_duration), "pj",
                                    "not appicable")

        tc_junit_object.output_junit(data_repository['wt_resultsdir'])

        # Save JUnit/HTML results of the Case in MongoDB server
        if data_repository.get("db_obj") is not False:
            tc_junit_xml = data_repository[
                'wt_resultsdir'] + os.sep + tc_junit_object.filename + "_junit.xml"
            data_repository.get("db_obj").add_html_result_to_mongodb(
                tc_junit_xml)
    else:
        # send an email on TC failure(no need to send an email here when
        # executing a single case).
        if str(tc_status).upper() in ["FALSE", "ERROR", "EXCEPTION"]:
            email_setting = None
            # for first TC failure
            if "any_failures" not in data_repository:
                email_params = email.get_email_params("first_failure")
                if all(value != "" for value in email_params[:3]):
                    email_setting = "first_failure"
                data_repository['any_failures'] = True
            # for further TC failures
            if email_setting is None:
                email_params = email.get_email_params("every_failure")
                if all(value != "" for value in email_params[:3]):
                    email_setting = "every_failure"

            if email_setting is not None:
                email.compose_send_email(
                    "Test Case: ", data_repository['wt_testcase_filepath'],
                    data_repository['wt_logsdir'],
                    data_repository['wt_resultsdir'], tc_status, email_setting)

        if 'wp_results_execdir' in data_repository:
            # Create and replace existing Project junit file for each case
            tc_junit_object.output_junit(data_repository['wp_results_execdir'],
                                         print_summary=False)
        else:
            # Create and replace existing Suite junit file for each case
            tc_junit_object.output_junit(data_repository['wt_results_execdir'],
                                         print_summary=False)

    if tc_parallel:
        tc_impact = data_repository['wt_tc_impact']
        if tc_impact.upper() == 'IMPACT':
            msg = "Status of the executed test case impacts Testsuite result"
        elif tc_impact.upper() == 'NOIMPACT':
            msg = "Status of the executed test case does not impact Teststuie result"
        print_debug(msg)
        tc_name = Utils.file_Utils.getFileName(testcase_filepath)
        # put result into multiprocessing queue and later retrieve in corresponding driver
        queue.put(
            (tc_status, tc_name, tc_impact, tc_duration, tc_junit_object))

    # Save XML results of the Case in MongoDB server
    if data_repository.get("db_obj") is not False:
        data_repository.get("db_obj").add_xml_result_to_mongodb(
            data_repository['wt_resultfile'])

    # main need tc_status and data_repository values to unpack
    return tc_status, data_repository
Esempio n. 26
0
def execute_step(step, step_num, data_repository, system_name, parallel,
                 queue):
    """ Executes a step from the testcase xml file
        - Parses a step from the testcase xml file
        - Get the values of Driver, Keyword, impactsTcResult
        - If the step has arguments, get all the arguments and store them as key/value pairs in args_repository
        - Sends the Keyword, data_repository, args_repository to the respective Driver.
        - Reports the status of the keyword executed (obtained as return value from the respective Driver)

    Arguments:
    1. step            = (xml element) xml element with tag <step> containing the details of the step to be executed like (Driver, Keyword, Arguments, Impact etc..)
    2. step_num        = (int) step number being executed
    3. data_repository = (dict) data_repository of the testcase
    """

    tc_junit_object = data_repository['wt_junit_object']
    driver = step.get('Driver')
    keyword = step.get('Keyword')
    context = Utils.testcase_Utils.get_context_from_xmlfile(step)
    step_impact = Utils.testcase_Utils.get_impact_from_xmlfile(step)
    step_description = Utils.testcase_Utils.get_description_from_xmlfile(step)

    if parallel is True:
        step_console_log = get_step_console_log(
            data_repository['wt_filename'], data_repository['wt_logsdir'],
            'step-{0}_{1}_consoleLogs'.format(step_num, keyword))

    data_repository['step_num'] = step_num
    data_repository['wt_driver'] = driver
    data_repository['wt_keyword'] = keyword
    data_repository['wt_step_impact'] = step_impact
    data_repository['wt_step_context'] = context
    data_repository['wt_step_description'] = step_description

    kw_resultfile = get_keyword_resultfile(data_repository, system_name,
                                           step_num, keyword)
    Utils.config_Utils.set_resultfile(kw_resultfile)
    # print keyword to result file
    Utils.testcase_Utils.pKeyword(keyword, driver)
    print_info("step number: {0}".format(step_num))
    print_info("Teststep Description: {0}".format(step_description))

    if step.find("runmode") is not None and step.find("runmode").get(
            "attempt") is not None:
        print_info("keyword attempt: {0}".format(
            step.find("runmode").get("attempt")))
    if step.find("retry") is not None and step.find("retry").get(
            "attempt") is not None:
        print_info("keyword attempt: {0}".format(
            step.find("retry").get("attempt")))
    kw_start_time = Utils.datetime_utils.get_current_timestamp()
    print_info("[{0}] Keyword execution starts".format(kw_start_time))
    # get argument list provided by user
    args_repository = get_arguments(step)
    if system_name is not None:
        args_repository['system_name'] = system_name
    Utils.testcase_Utils.update_arguments(args_repository)
    Utils.testcase_Utils.update_kw_resultfile(kw_resultfile)

    exec_type_onerror = False
    action, keyword_status = exec_type_driver.main(step)

    if action is True:
        send_keyword_to_productdriver(driver, keyword, data_repository,
                                      args_repository)
        keyword_status = data_repository['step-%s_status' % step_num]
        Utils.testcase_Utils.update_step_num(str(step_num))
        if context.upper() == 'NEGATIVE' and type(keyword_status) == bool:
            print_debug(
                "Keyword status = {0}, Flip status as context is Negative".
                format(keyword_status))
            keyword_status = not keyword_status
    elif action == 'SKIP':
        print_debug("Action is {0}".format(action))

    elif action is False:
        exec_type_onerror = True
        print_debug("Action is {0}".format(action))

    print("\n")
    print_info("*** Keyword status ***")
    step_goto_value = False
    step_onError_action = Utils.xml_Utils.get_attributevalue_from_directchildnode(
        step, 'onError', 'action')
    if step_onError_action is not False:
        if step_onError_action.upper() == 'GOTO':
            step_goto_value = Utils.xml_Utils.get_attributevalue_from_directchildnode(
                step, 'onError', 'value')
    testcase_error_action = data_repository['wt_def_on_error_action']
    step_onError_action = step_onError_action if step_onError_action else testcase_error_action
    if step_onError_action.upper() == "GOTO" and step_goto_value == False:
        step_goto_value = data_repository['wt_def_on_error_value']
    onerror = step_onError_action.upper()
    if step_goto_value is not False and step_goto_value is not None:
        onerror = onerror + " step " + step_goto_value
    if keyword_status == False and step_onError_action and step_onError_action.upper(
    ) == 'ABORT_AS_ERROR':
        print_info(
            "Keyword status will be marked as ERROR as onError action is set to 'abort_as_error'"
        )
        keyword_status = "ERROR"
    Utils.testcase_Utils.reportKeywordStatus(keyword_status, keyword)
    print_info("step number: {0}".format(step_num))

    string_status = {
        "TRUE": "PASS",
        "FALSE": "FAIL",
        "ERROR": "ERROR",
        "EXCEPTION": "EXCEPTION",
        "SKIP": "SKIP"
    }

    if str(keyword_status).upper() in string_status.keys():
        data_repository['step_%s_result' %
                        step_num] = string_status[str(keyword_status).upper()]
    else:
        print_error("unexpected step status, default to exception")
        data_repository['step_%s_result' % step_num] = "EXCEPTION"

    if step_impact.upper() == 'IMPACT':
        msg = "Status of the executed step  impacts TC result"
        if str(keyword_status).upper() == 'SKIP':
            keyword_status = None
        # elif exec_type_onerror is False and str(keyword_status).upper() ==
        # 'SKIP':
    elif step_impact.upper() == 'NOIMPACT':
        msg = "Status of the executed step does not impact TC result"
    Utils.testcase_Utils.pNote_level(msg, "debug", "kw")
    if data_repository.has_key('step-%s_exception' % step_num):
        msg = "Exception message: " + \
            data_repository['step-%s_exception' % step_num]
        Utils.testcase_Utils.pNote_level(msg, "debug", "kw", ptc=False)
    # time.sleep(1)
    print("\n")
    kw_end_time = Utils.datetime_utils.get_current_timestamp()
    tc_duration = Utils.datetime_utils.get_time_delta(kw_start_time)
    hms = Utils.datetime_utils.get_hms_for_seconds(tc_duration)
    print_info("Keyword duration= {0}".format(hms))
    print_info("[{0}] Keyword execution completed".format(kw_end_time))

    impact_dict = {"IMPACT": "Impact", "NOIMPACT": "No Impact"}
    tc_junit_object.add_keyword_result(data_repository['wt_tc_timestamp'],
                                       step_num, keyword, str(keyword_status),
                                       kw_start_time, tc_duration,
                                       kw_resultfile,
                                       impact_dict.get(step_impact.upper()),
                                       onerror)
    tc_junit_object.update_count(str(keyword_status), "1", "tc",
                                 data_repository['wt_tc_timestamp'])
    tc_junit_object.update_count("keywords", "1", "tc",
                                 data_repository['wt_tc_timestamp'])

    if parallel is True:
        # put result into multiprocessing queue and later retrieve in
        # corresponding driver
        queue.put((keyword_status, kw_resultfile, step_impact.upper(),
                   tc_junit_object))
    else:
        return keyword_status, kw_resultfile, step_impact, exec_type_onerror
Esempio n. 27
0
def execute_testsuite(testsuite_filepath, data_repository, from_project,
                      auto_defects, jiraproj, res_startdir, logs_startdir,
                      ts_onError_action, queue, ts_parallel):
    """Executes the testsuite (provided as a xml file)
            - Takes a testsuite xml file as input and
            sends each testcase to Basedriver for execution.
            - Computes the testsuite status based on the
            testcase_status and the impact value of the testcase
            - Handles testcase failures as per the default/specific onError action/value
            - Calls the function to report the testsuite status

    Arguments:
    1. testsuite_filepath   = (string) the full path of the testsuite xml file.
    2. Warrior          = (module loader) module loader object to call the Warrior
    3. execution_dir        = (string) the full path of the directory under which the testsuite
                              execution directory will be created (results for the testsuite will
                              be stored in the  testsuite execution directory.)
    """
    testsuite_status_list = []
    suite_start_time = Utils.datetime_utils.get_current_timestamp()
    print_info("[{0}] Testsuite execution starts".format(suite_start_time))
    initialize_suite_fields(data_repository)
    suite_repository = get_suite_details(testsuite_filepath, data_repository,
                                         from_project, res_startdir,
                                         logs_startdir)
    testcase_list = common_execution_utils.get_step_list(
        testsuite_filepath, "Testcases", "Testcase")
    execution_type = suite_repository['suite_exectype'].upper()
    no_of_tests = str(len(testcase_list))

    junit_resultfile = suite_repository['junit_resultfile']
    suite_name = suite_repository['suite_name']
    suite_execution_dir = suite_repository['suite_execution_dir']

    data_repository['wt_suite_execution_dir'] = suite_execution_dir
    data_repository['wt_results_execdir'] = suite_repository[
        'ws_results_execdir']
    data_repository['wt_logs_execdir'] = suite_repository['ws_logs_execdir']
    data_repository['wt_suite_name'] = suite_name

    suite_timestamp = testsuite_utils.get_suite_timestamp()
    data_repository['wt_ts_timestamp'] = suite_timestamp
    suite_repository['wt_ts_timestamp'] = suite_timestamp

    data_repository["suite_exectype"] = "iterative" if execution_type == "ITERATIVE_SEQUENTIAL" or \
    execution_type == "ITERATIVE_PARALLEL" else execution_type

    ts_junit_display = "True"
    pj_junit_display = "False"
    if "wt_junit_object" in data_repository:
        ts_junit_object = data_repository["wt_junit_object"]

    else:
        ts_junit_object = junit_class.Junit(
            filename=suite_name,
            timestamp=suite_timestamp,
            name="customProject_independant_testcase_execution",
            display=pj_junit_display)

        if "jobid" in data_repository:
            ts_junit_object.add_jobid(data_repository["jobid"])
            del data_repository["jobid"]
        data_repository["wt_junit_object"] = ts_junit_object
    suite_repository["wt_junit_object"] = ts_junit_object
    ts_junit_object.create_testsuite(
        location=os.path.dirname(testsuite_filepath),
        name=suite_name,
        timestamp=suite_timestamp,
        suite_location=suite_repository['testsuite_filepath'],
        title=suite_repository['suite_title'],
        display=ts_junit_display,
        **ts_junit_object.init_arg())

    # Adding resultsdir as attributes to testsuite_tag in the junit result file
    # Need to remove these after making resultsdir, logsdir as part of properties tag in testcase
    ts_junit_object.update_attr("resultsdir",
                                suite_repository['suite_execution_dir'], "ts",
                                suite_timestamp)
    ts_junit_object.add_property("resultsdir",
                                 suite_repository['suite_execution_dir'], "ts",
                                 suite_timestamp)

    if suite_repository.has_key("data_file"):
        data_repository['suite_data_file'] = suite_repository['data_file']

    # jiraproj name
    data_repository['jiraproj'] = jiraproj

    # if not from_project:
    testsuite_utils.pSuite_root(junit_resultfile)

    testsuite_utils.pSuite_testsuite(junit_resultfile,
                                     suite_name,
                                     errors='0',
                                     skipped='0',
                                     tests=no_of_tests,
                                     failures='0',
                                     time='0',
                                     timestamp=suite_timestamp)
    testsuite_utils.pSuite_property(junit_resultfile, 'title',
                                    suite_repository['suite_title'])
    testsuite_utils.pSuite_property(junit_resultfile, 'location',
                                    testsuite_filepath)
    if "jobid" in data_repository:
        testsuite_utils.pSuite_property(junit_resultfile, 'resultlocation',
                                        data_repository["jobid"])
        # del data_repository["jobid"]

    print_suite_details_to_console(suite_repository, testsuite_filepath,
                                   junit_resultfile)

    # Prints the path of result summary file at the beginning of execution
    if data_repository['war_file_type'] == "Suite":
        filename = os.path.basename(testsuite_filepath)
        html_filepath = os.path.join(
            suite_repository['suite_execution_dir'],
            Utils.file_Utils.getNameOnly(filename)) + '.html'
        print_info("HTML result file: {0}".format(html_filepath))
    if not from_project:
        data_repository["war_parallel"] = False

    root = Utils.xml_Utils.getRoot(testsuite_filepath)
    suite_global_xml = root.find('Details')
    runmode, value, _ = common_execution_utils.get_runmode_from_xmlfile(
        suite_global_xml)

    #get testwrapperfile details
    testwrapperfile, j_data_type, j_runtype, setup_on_error_action = \
        get_testwrapper_file_details(testsuite_filepath, data_repository)
    setup_tc_status, cleanup_tc_status = True, True
    #execute setup steps defined in testwrapper file if testwrapperfile is present
    if testwrapperfile:
        print_info(
            "*****************TESTWRAPPER SETUP EXECUTION START*********************"
        )
        data_repository['suite_testwrapper_file'] = testwrapperfile
        data_repository['wt_data_type'] = j_data_type
        setup_tc_status, data_repository = testcase_driver.execute_testcase(testwrapperfile,\
                                            data_repository, tc_context='POSITIVE',\
                                            runtype=j_runtype,\
                                            tc_parallel=None, queue=None,\
                                            auto_defects=auto_defects, suite=None,\
                                            jiraproj=None, tc_onError_action='ABORT_AS_ERROR',\
                                            iter_ts_sys=None, steps_tag='Setup')
        print_info(
            "*****************TESTWRAPPER SETUP EXECUTION END**********************"
        )
    if setup_on_error_action == 'next' or \
        (setup_on_error_action == 'abort' and setup_tc_status == True):
        if execution_type.upper() == 'PARALLEL_TESTCASES':
            ts_junit_object.remove_html_obj()
            data_repository["war_parallel"] = True
            print_info("Executing testcases in parallel")
            test_suite_status = parallel_testcase_driver.main(
                testcase_list,
                suite_repository,
                data_repository,
                from_project,
                tc_parallel=True,
                auto_defects=auto_defects)

        elif execution_type.upper() == 'SEQUENTIAL_TESTCASES':
            if runmode is None:
                print_info("Executing testcases sequentially")
                test_suite_status = sequential_testcase_driver.main(
                    testcase_list,
                    suite_repository,
                    data_repository,
                    from_project,
                    auto_defects=auto_defects)

            elif runmode.upper() == "RUF":
                print_info("Execution type: {0}, Attempts: {1}".format(
                    runmode, value))
                i = 0
                while i < int(value):
                    i += 1
                    print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i))
                    test_suite_status = sequential_testcase_driver.main(
                        testcase_list,
                        suite_repository,
                        data_repository,
                        from_project,
                        auto_defects=auto_defects)
                    test_count = i * len(testcase_list)
                    testsuite_status_list.append(test_suite_status)
                    testsuite_utils.pSuite_update_suite_tests(str(test_count))
                    if str(test_suite_status).upper() == "FALSE" or\
                       str(test_suite_status).upper() == "ERROR":
                        break

            elif runmode.upper() == "RUP":
                print_info("Execution type: {0}, Attempts: {1}".format(
                    runmode, value))
                i = 0
                while i < int(value):
                    i += 1
                    print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i))
                    test_suite_status = sequential_testcase_driver.main(
                        testcase_list,
                        suite_repository,
                        data_repository,
                        from_project,
                        auto_defects=auto_defects)
                    test_count = i * len(testcase_list)
                    testsuite_status_list.append(test_suite_status)
                    testsuite_utils.pSuite_update_suite_tests(str(test_count))
                    if str(test_suite_status).upper() == "TRUE":
                        break

            elif runmode.upper() == "RMT":
                print_info("Execution type: {0}, Attempts: {1}".format(
                    runmode, value))
                i = 0
                while i < int(value):
                    i += 1
                    print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i))
                    # We aren't actually summing each test result here...
                    test_suite_status = sequential_testcase_driver.main(
                        testcase_list,
                        suite_repository,
                        data_repository,
                        from_project,
                        auto_defects=auto_defects)
                    testsuite_status_list.append(test_suite_status)
        # The below runmode part is not modified/removed to preserve backward compatibility
        elif execution_type.upper() == 'RUN_UNTIL_FAIL' and runmode is None:
            execution_value = Utils.xml_Utils.getChildAttributebyParentTag(
                testsuite_filepath, 'Details', 'type', 'Max_Attempts')
            execution_value = 1 if execution_value == "" else execution_value
            print_info("Execution type: {0}, Attempts: {1}".format(
                execution_type, execution_value))
            i = 0
            while i < int(execution_value):
                i += 1
                print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i))
                test_suite_status = sequential_testcase_driver.main(
                    testcase_list,
                    suite_repository,
                    data_repository,
                    from_project,
                    auto_defects=auto_defects)
                test_count = i * len(testcase_list)
                testsuite_utils.pSuite_update_suite_tests(str(test_count))
                if str(test_suite_status).upper() == "FALSE" or\
                   str(test_suite_status).upper() == "ERROR":
                    break

        elif execution_type.upper() == 'RUN_UNTIL_PASS' and runmode is None:
            execution_value = Utils.xml_Utils.getChildAttributebyParentTag(
                testsuite_filepath, 'Details', 'type', 'Max_Attempts')
            execution_value = 1 if execution_value == "" else execution_value
            print_info("Execution type: {0}, Attempts: {1}".format(
                execution_type, execution_value))
            i = 0
            while i < int(execution_value):
                i += 1
                print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i))
                test_suite_status = sequential_testcase_driver.main(
                    testcase_list,
                    suite_repository,
                    data_repository,
                    from_project,
                    auto_defects=auto_defects)
                test_count = i * len(testcase_list)
                testsuite_utils.pSuite_update_suite_tests(str(test_count))
                if str(test_suite_status).upper() == "TRUE":
                    break

        elif execution_type.upper() == 'RUN_MULTIPLE' and runmode is None:
            execution_value = Utils.xml_Utils.getChildAttributebyParentTag(
                testsuite_filepath, 'Details', 'type', 'Number_Attempts')
            execution_value = 1 if execution_value == "" else execution_value
            print_info("Execution type: {0}, Attempts: {1}".format(
                execution_type, execution_value))

            i = 0
            while i < int(execution_value):
                i += 1
                print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i))
                # We aren't actually summing each test result here...
                test_suite_status = sequential_testcase_driver.main(
                    testcase_list,
                    suite_repository,
                    data_repository,
                    from_project,
                    auto_defects=auto_defects)

        elif execution_type.upper() == "ITERATIVE_SEQUENTIAL":
            # if execution type is iterative sequential call WarriorCore.Classes.iterative_testsuite
            # class and execute the testcases in iterative sequential fashion on the systems
            print_info("Iterative sequential suite")

            iter_seq_ts_obj = IterativeTestsuite(testcase_list,
                                                 suite_repository,
                                                 data_repository, from_project,
                                                 auto_defects)
            test_suite_status = iter_seq_ts_obj.execute_iterative_sequential()

        elif execution_type.upper() == "ITERATIVE_PARALLEL":
            # if execution type is iterative parallel call WarriorCore.Classes.iterative_testsuite
            # class and execute the testcases in iterative parallel fashion on the systems
            ts_junit_object.remove_html_obj()
            print_info("Iterative parallel suite")
            data_repository["war_parallel"] = True
            iter_seq_ts_obj = IterativeTestsuite(testcase_list,
                                                 suite_repository,
                                                 data_repository, from_project,
                                                 auto_defects)

            test_suite_status = iter_seq_ts_obj.execute_iterative_parallel()

        else:
            print_error("unexpected suite_type received...aborting execution")
            test_suite_status = False

        if runmode is not None:
            test_suite_status = common_execution_utils.compute_runmode_status(
                testsuite_status_list, runmode, suite_global_xml)
    else:
        print_error("Test cases in suite are not executed as setup failed to execute,"\
                    "setup status : {0}".format(setup_tc_status))
        print_error("Steps in cleanup will be executed on besteffort")
        test_suite_status = "ERROR"
    #execute cleanup steps defined in testwrapper file if testwrapperfile is present
    if testwrapperfile:
        print_info(
            "*****************TESTWRAPPER CLEANUP EXECUTION START*********************"
        )
        data_repository['wt_data_type'] = j_data_type
        cleanup_tc_status, data_repository = testcase_driver.execute_testcase(testwrapperfile,\
                                                          data_repository, tc_context='POSITIVE',\
                                                          runtype=j_runtype,\
                                                          tc_parallel=None, queue=None,\
                                                          auto_defects=auto_defects, suite=None,\
                                                          jiraproj=None, tc_onError_action=None,\
                                                          iter_ts_sys=None, steps_tag='Cleanup')
        print_info(
            "*****************TESTWRAPPER CLEANUP EXECUTION END*********************"
        )
    print_info("\n")
    suite_end_time = Utils.datetime_utils.get_current_timestamp()
    print_info("[{0}] Testsuite execution completed".format(suite_end_time))

    if test_suite_status == True and cleanup_tc_status == True:
        test_suite_status = True
    #set status to WARN if only cleanup fails
    elif test_suite_status == True and cleanup_tc_status != True:
        print_warning("setting test suite status to WARN as cleanup failed")
        test_suite_status = 'WARN'

    suite_duration = Utils.datetime_utils.get_time_delta(suite_start_time)
    hms = Utils.datetime_utils.get_hms_for_seconds(suite_duration)
    print_info("Testsuite duration= {0}".format(hms))
    testsuite_utils.update_suite_duration(str(suite_duration))
    if test_suite_status == False and ts_onError_action and\
        ts_onError_action.upper() == 'ABORT_AS_ERROR':
        print_info(
            "Testsuite status will be marked as ERROR as onError action is set"
            "to 'abort_as_error'")
        test_suite_status = "ERROR"
    testsuite_utils.report_testsuite_result(suite_repository,
                                            test_suite_status)

    ts_junit_object = data_repository['wt_junit_object']
    ts_junit_object.update_count(test_suite_status, "1", "pj")
    ts_junit_object.update_count("suites", "1", "pj", "not appicable")
    ts_junit_object.update_attr("status", str(test_suite_status), "ts",
                                suite_timestamp)
    ts_junit_object.update_attr("time", str(suite_duration), "ts",
                                suite_timestamp)

    if not from_project:
        ts_junit_object.update_attr("status", str(test_suite_status), "pj",
                                    "not applicable")
        ts_junit_object.update_attr("time", str(suite_duration), "pj",
                                    "not appicable")
        ts_junit_object.output_junit(data_repository['wt_results_execdir'])

        # Save JUnit/HTML results of the Suite in MongoDB server
        if data_repository.get("db_obj") is not False:
            ts_junit_xml = (data_repository['wt_results_execdir'] + os.sep +
                            ts_junit_object.filename + "_junit.xml")
            data_repository.get("db_obj").add_html_result_to_mongodb(
                ts_junit_xml)
    else:
        # Do not output JUnit result file for parallel suite execution
        if not ts_parallel and not data_repository['war_parallel']:
            # Create and replace existing Project junit file for each suite
            ts_junit_object.output_junit(data_repository['wp_results_execdir'],
                                         print_summary=False)

    if ts_parallel:
        ts_impact = data_repository['wt_ts_impact']
        if ts_impact.upper() == 'IMPACT':
            msg = "Status of the executed suite impacts project result"
        elif ts_impact.upper() == 'NOIMPACT':
            msg = "Status of the executed suite case does not impact project result"
        print_debug(msg)
        # put result into multiprocessing queue and later retrieve in corresponding driver
        queue.put(
            (test_suite_status, ts_impact, suite_timestamp, ts_junit_object))

    return test_suite_status, suite_repository
Esempio n. 28
0
def connect_telnet(ip,
                   port="23",
                   username="",
                   password="",
                   logfile=None,
                   timeout=60,
                   prompt=".*(%|#|\$)",
                   conn_options="",
                   custom_keystroke="",
                   escape="",
                   **kwargs):
    """
    Initiates Telnet connection via a specific port. Creates log file.

    :Arguments:
        1. ip = destination ip
        2. port(string) = telnet port
        3. username(string) = username
        4. password(string) = password
        5. logfile(string) = logfile name
        6. timeout(int) = timeout duration
        7. prompt(string) = destination prompt

    :Returns:
        1.telnet session as object
        2.conn_string(pre and post login message)
    """
    print_warning("This method is obsolete and will be deprecated soon. Please"
                  " use 'connect_telnet' method of 'PexpectConnect' class "
                  "in 'warrior/Framework/ClassUtils/warrior_connect_class.py'")

    conn_options = "" if conn_options is False or conn_options is None else conn_options
    custom_keystroke = "wctrl:M" if not custom_keystroke else custom_keystroke
    print_debug("timeout is: %s" % timeout)
    print_debug("port num is: %s" % port)
    command = ('telnet ' + ip + ' ' + port)
    if not conn_options or conn_options is None:
        conn_options = ""
    command = command + str(conn_options)
    print_debug("connectTelnet cmd = %s" % command)
    child = pexpect_spawn_with_env(pexpect,
                                   command,
                                   timeout=int(timeout),
                                   escape=escape,
                                   env={"TERM": "dumb"})
    conn_string = ""
    telnetobj = None
    try:
        child.logfile = open(logfile, "a")
    except Exception:
        child.logfile = None

    try:
        flag = True
        child.setecho(False)
        child.delaybeforesend = .5
        while True:
            result = child.expect([
                prompt, '.*(?i)password:.*', ".*(?i)(user(name)?:|login:) *$",
                pexpect.EOF, pexpect.TIMEOUT
            ])
            if result == 0:
                telnetobj = child
                conn_string = conn_string + child.before + child.after
                break
            elif result == 1:
                child.sendline(password)
                conn_string = conn_string + child.before + child.after
            elif result == 2:
                child.sendline(username)
            elif result == 3:
                pNote("Connection failed: {0}, with the system response: {1}"\
                      .format(command, child.before), "error")
                break
            elif result == 4:
                # timed out tryonce with Enter has some terminal expects it
                if flag is True:
                    pNote("Initial timeout occur, sending custom_keystroke")
                    _send_cmd_by_type(child, custom_keystroke)
                    flag = False
                    continue
                pNote("Connection timed out: {0}, expected prompt: {1} "\
                      "is not found in the system response: {2}"\
                      .format(command, prompt, child.before), "error")
                break
    except Exception as exception:
        print_error(" ! could not connect to %s...check logs" % ip)
        print_exception(exception)
    else:
        return telnetobj, conn_string
Esempio n. 29
0
def connect_ssh(ip,
                port="22",
                username="",
                password="",
                logfile=None,
                timeout=60,
                prompt=".*(%|#|\$)",
                conn_options="",
                custom_keystroke="",
                escape="",
                **kwargs):
    """
    - Initiates SSH connection via a specific port. Creates log file.
    - return session as object and conn_string(pre and post login message).
    """
    print_warning("This method is obsolete and will be deprecated soon. Please"
                  " use 'connect_ssh' method of 'PexpectConnect' class "
                  "in 'warrior/Framework/ClassUtils/warrior_connect_class.py'")

    sshobj = None
    conn_string = ""
    conn_options = "" if conn_options is False or conn_options is None else conn_options
    custom_keystroke = "wctrl:M" if not custom_keystroke else custom_keystroke
    # delete -o StrictHostKeyChecking=no and put them in conn_options
    if not conn_options or conn_options is None:
        conn_options = ""
    command = 'ssh -p {0} {1}@{2} {3}'.format(port, username, ip, conn_options)
    #command = ('ssh -p '+ port + ' ' + username + '@' + ip)
    print_debug("connectSSH: cmd = %s" % command)
    if WarriorCliClass.cmdprint:
        pNote(("connectSSH: :CMD: %s" % command))
        return None, ""
    child = pexpect_spawn_with_env(pexpect,
                                   command,
                                   timeout=int(timeout),
                                   escape=escape,
                                   env={"TERM": "dumb"})

    child.logfile = sys.stdout

    if logfile is not None:
        try:
            fdobj = open(logfile, "a")
            if fdobj:
                child.logfile = fdobj
        except Exception as exception:
            print_exception(exception)

    try:
        flag = True
        child.setecho(False)
        child.delaybeforesend = .5
        while True:
            result = child.expect([
                "(yes/no)", prompt, '.*(?i)password:.*',
                ".*(?i)(user(name)?:|login:) *$", pexpect.EOF, pexpect.TIMEOUT,
                '.*(?i)remote host identification has '
                'changed.*'
            ])

            if result == 0:
                child.sendline('yes')
            elif result == 1:
                sshobj = child
                conn_string = conn_string + child.before + child.after
                break
            elif result == 2:
                child.sendline(password)
                conn_string = conn_string + child.before + child.after
            elif result == 3:
                child.sendline(username)
            elif result == 4:
                pNote("Connection failed: {0}, with the system response: {1}"\
                      .format(command, child.before), "error")
                break
            elif result == 5:
                # Some terminal expect specific keystroke before showing login prompt
                if flag is True:
                    pNote("Initial timeout occur, sending custom_keystroke")
                    _send_cmd_by_type(child, custom_keystroke)
                    flag = False
                    continue
                pNote("Connection timed out: {0}, expected prompt: {1} "\
                      "is not found in the system response: {2}"\
                      .format(command, prompt, child.before), "error")
                break
            elif result == 6:
                cmd = "ssh-keygen -R " + ip if port == '22' else \
                      "ssh-keygen -R " + "[" + ip + "]:" + port
                print_debug("SSH Host Key is changed - Remove it from "
                            "known_hosts file : cmd = %s" % cmd)
                subprocess.call(cmd, shell=True)
                child = pexpect_spawn_with_env(pexpect,
                                               command,
                                               timeout=int(timeout),
                                               escape=escape,
                                               env={"TERM": "dumb"})
                print_debug("ReconnectSSH: cmd = %s" % command)
    except Exception as exception:
        print_exception(exception)
    return sshobj, conn_string
Esempio n. 30
0
def send_commands_from_testdata(testdatafile, obj_session, **args):
    """
    - Parses the testdata file and gets the command details
    for rows marked execute=yes and row=str_rownum.
    - Sends the obtained commands to the pexpect session (obj_Session).
    - If the commands have verification attribute set,
    then verifies the verification text for presence/absence as defined
    in the respective found attribute in the testdatfile.

    :Arguments:
        1. testdatafile = the xml file where command details are available
        2. obj_session = pexpect session object
        3. logfile = logfile of the pexpect session object.
        4. varconfigfile=  xml file from which the values will be taken for subtitution
        5. var_sub(string) = the pattern [var_sub] in the testdata commands,
                                 start_prompt, end_prompt, verification search
                                 will substituted with this value.
        6. args = Optional filter to specify title/rownum
    :Returns:
        1. finalresult = boolean
    """
    responses_dict = {}
    varconfigfile = args.get('varconfigfile', None)
    datafile = args.get("datafile", None)
    var_sub = args.get('var_sub', None)
    title = args.get('title', None)
    row = args.get('row', None)
    if WarriorCliClass.cmdprint:
        pNote("**************{}**************".format('Title: ' + title))
        if row:
            pNote("**************{}**************".format('Row: ' + row))
    system_name = args.get("system_name")
    session_name = args.get("session_name")
    if session_name is not None:
        system_name = system_name + "." + session_name
    testdata_dict = data_Utils.get_command_details_from_testdata(
        testdatafile,
        varconfigfile,
        var_sub=var_sub,
        title=title,
        row=row,
        system_name=system_name,
        datafile=datafile)
    finalresult = True if len(testdata_dict) > 0 else False
    for key, details_dict in testdata_dict.iteritems():
        response_dict = {}
        responses_dict[key] = ""
        command_list = details_dict["command_list"]
        stepdesc = "Send the following commands: "
        pNote(stepdesc)
        n = 0
        for commands in command_list:
            pNote("Command #{0}\t: {1}".format((n + 1), commands))
            n = n + 1
        intsize = len(command_list)
        if intsize == 0:
            finalresult = False

        # Send Commands
        for i in range(0, intsize):
            print_info("")
            print_debug(">>>")
            command = details_dict["command_list"][i]
            pNote("Command #{0}\t: {1}".format(str(i + 1), command))
            new_obj_session, system_name, details_dict = \
                _get_obj_session(details_dict, obj_session,
                                 system_name, index=i)
            if new_obj_session:
                result, response = _send_cmd_get_status(
                    new_obj_session,
                    details_dict,
                    index=i,
                    system_name=system_name)
                result, response = _send_command_retrials(
                    new_obj_session,
                    details_dict,
                    index=i,
                    result=result,
                    response=response,
                    system_name=system_name)
                response_dict = _get_response_dict(details_dict, i, response,
                                                   response_dict)
                print_debug("<<<")
            else:
                finalresult = "ERROR"
                pNote("COMMAND STATUS:{0}".format(finalresult))
                print_debug("<<<")
                continue

            if result == "ERROR" or finalresult == "ERROR":
                result = "ERROR"
                finalresult = "ERROR"
            finalresult = finalresult and result
        responses_dict[key] = response_dict
    return finalresult, responses_dict