def get_log_files(self):
        """Get execution logs dir and results """

        if self.logs_startdir is not None:
            if self.logs_startdir == self.res_startdir:
                logs_execdir = self.results_execdir
            else:
                logs_execdir = file_Utils.createDir_addtimestamp(
                    self.logs_startdir, self.nameonly)
            logfile = self.get_exec_file_by_type("Logs", logs_execdir)

        elif self.logs_startdir is None:
            colocate = False
            logs_location = xml_Utils.getChildTextbyParentTag(
                self.filepath, 'Details', 'Logsdir')
            results_location = xml_Utils.getChildTextbyParentTag(
                self.filepath, 'Details', 'Resultsdir')
            # get default logs and results directory
            #default_xml = Tools.__path__[0] + os.sep + 'w_settings.xml'
            default_xml = os.getenv(
                "WAR_TOOLS_DIR") + os.sep + 'w_settings.xml'
            default_logsdir = get_credentials(default_xml, 'def_dir',
                                              ['Logsdir'], 'Setting')
            default_resultsdir = get_credentials(default_xml, 'def_dir',
                                                 ['Resultsdir'], 'Setting')
            # use the default directory if user didn't define it in test case/test suite/project
            if results_location is None or results_location is False:
                if default_resultsdir['Resultsdir'] is not None:
                    results_location = default_resultsdir['Resultsdir']

            if logs_location is None or logs_location is False:
                if default_logsdir['Logsdir'] is not None:
                    logs_location = default_logsdir['Logsdir']

            if logs_location is None or logs_location is False \
                    or str(logs_location).strip() == "":
                logs_execdir = self.create_def_exec_dir()
                logfile = self.get_exec_file_by_type('Logs', logs_execdir)

            elif logs_location is not None and logs_location is not False:
                logs_location_rel = str(logs_location).strip()
                logs_location = file_Utils.getAbsPath(
                    logs_location_rel, os.path.dirname(self.filepath))
                results_location_rel = str(results_location).strip()
                results_location = file_Utils.getAbsPath(
                    results_location_rel, os.path.dirname(self.filepath))
                if logs_location == results_location:
                    colocate = True

                logfile, logs_execdir = self.checkdir_create_file(
                    logs_location, 'Logs', colocate)

        # print "printing logs_execdir: ", logs_execdir
        logsdir = os.path.dirname(logfile)
        return logfile, logsdir, logs_execdir
Ejemplo n.º 2
0
    def __check_dir_exists(cls, filepath, dirtag):
        """ Verify that the directory exists in the path provided.

        Arguments:
              1. filepath: file path will be parsed as input for checking
                 directories
              2. dirtag: directory tag that used to get directory path
        """

        dirt = xml_Utils.getChildTextbyParentTag(filepath, 'Details', dirtag)
        directory = file_Utils.getAbsPath(dirt, os.path.dirname(filepath))

        if directory is not False and directory is not None:
            print_info("{0} path {1}".format(dirtag, directory))
            if not os.path.exists(directory):
                print_warning("Directory does not exist in location {0}."\
                "\nWarrior framework will try to create the directory, if creation "\
                "fails then default warriorspace will be used to collect logs/results"\
                .format(directory))
        else:
            if dirtag is 'Resultsdir':
                print_info(
                    "Default directory in Warriorspace will be used to collect results"
                )
            else:
                print_info(
                    "Default directory in Warriorspace will be used to collect logs"
                )
def main(parameter_list):
    """Check the validity of testcase/testuite/project xml files """
    valid = True
    print_info("=" * 10 + "PRE-RUN XML VALIDATION" + "=" * 10 + "\n")
    if len(parameter_list) > 0:
        for parameter in parameter_list:
            # check if the input parameter is an xml file
            if file_Utils.get_extension_from_path(parameter) == '.xml':
                filepath = parameter
                abs_filepath = file_Utils.getAbsPath(filepath, os.curdir)
                res = iron_claw_warrior_xml_files(abs_filepath)
                result = testcase_Utils.convertLogic(res)
                valid &= res
                print_info("File '{0}' '{1}ED' Warrior prerun"\
                           "validation".format(abs_filepath, result))
            else:
                print_error(
                    "Provided file '{0}' is not an xml file".format(parameter))
    else:
        print_error("No input files provided to be validated")
        valid = False
    print_info('\n')
    print_info("Validation Completed:")
    if valid:
        print_info("Files are compatible with WARRIOR \n")

    else:
        print_error("Files failed Warrior Ironclaw validation\n")
    return valid
def resolve_value_of_cookies(element):
    """ This function evaluates user input for cookies. If a file path is given,
     then a cookiejar object is created and the contents of the file are loaded
     into the object. This object is then returned.

    Else, a dictionary would be created out of the string given
    input = "foo=foo1; bar=bar1; ; =foobar; barfoo="
    return value = {'foo': 'foo1', 'bar': 'bar1'}

    If the dictionary is empty at the end of the function, None is retuened.
    """
    if element is not None and element is not False and element != "":
        abs_path = file_Utils.getAbsPath(element, sys.path[0])
        if os.path.exists(abs_path):
            element = http.cookiejar.LWPCookieJar(element)
            try:
                element.load()
            except http.cookiejar.LoadError:
                pNote("Cookies could not be loaded from {}.".format(element),
                      "error")
                element = None
            except Exception as e:
                pNote("An Error Occurred: {0}".format(e), "error")
        else:
            element = convert_string_to_dict(element)
    else:
        element = None

    return element
def get_all_file_paths(element):
    """ This function evaluates the value of the files tag and returned
    validated data.

        input: string

        path/to/file1.txt, path/to/file2.txt

        output: dict

        {
        file1.txt: open file1.txt (in binary mode),
        file2.txt: open file2.txt (in binary mode),
        }

    """
    final_dict = {}
    if element is not None and element is not False and element != "":
        abs_path = file_Utils.getAbsPath(element, sys.path[0])
        if os.path.exists(abs_path):
            final_dict[os.path.basename(os.path.normpath(element))] = \
                open(element, 'rb')
        else:
            pNote("{0} doesn't exist!".format(element), "error")
    else:
        if element == "":
            pNote("File path cannot be empty!", "error")
        else:
            pNote("File path cannot be {0}!".format(element), "error")
    return final_dict
    def convert_arg_to_datatype(self):
        """Parses the input argument to find the data type requested by the user

        This is based on the starting letters of the argument name
        If arg_name starts with:
        1. str_ = string
        2. int_ = integer
        3. float_ = float
        4. bool_ = boolean
        5. list_ = list
        6. tuple_ = tuple
        7. dict_ = dict

        Default: if none of the above naming convention matches then the argument value
        will be treated as a string
        """
        self.datatype = None
        if self.arg_name.startswith('str_'):
            return self.arg_value
        elif self.arg_name.startswith('int_'):
            self.datatype = int
        elif self.arg_name.startswith('float_'):
            self.datatype = float
        elif self.arg_name.startswith('bool_'):
            self.datatype = bool
            if self.arg_value.lower() == "true":
                self.arg_value = "True"
            elif self.arg_value.lower() == "false":
                self.arg_value = "False"
        elif self.arg_name.startswith('list_'):
            self.datatype = list
        elif self.arg_name.startswith('tuple_'):
            self.datatype = tuple
        elif self.arg_name.startswith('dict_'):
            self.datatype = dict
        elif self.arg_name.startswith('file_'):
            self.datatype = IOBase
            tc_path = config_Utils.tc_path
            fname = file_Utils.getAbsPath(self.arg_value, tc_path)
            try:
                self.arg_value = open(fname)
            except IOError:
                print_warning("given file {} does not exist, please check, it "
                              "should be relative to testcase path {}".format(
                                  fname, tc_path))
        else:
            # User has not specified any data type with the argument, but it can be
            # given a proper type through wtag or will be treated as string (default)
            return self.arg_value
        if self.datatype is not None:
            convert_msg = "Input argument {0} will be converted to a {1}".format(
                self.arg_name, self.datatype)
            print_debug(convert_msg)

        result = self.convert_string_to_datatype()
        return result
    def get_result_files(self):
        """Get execution results dir and files """

        if self.res_startdir is not None:
            try:
                if not os.path.exists(self.res_startdir):
                    print_warning(
                        "Given output dir does not exists, creating output dir at {0}"
                        .format(self.res_startdir))
                    os.makedirs(self.res_startdir)
                elif os.path.exists(self.res_startdir):
                    if not (os.access(self.res_startdir, os.R_OK)
                            and os.access(self.res_startdir, os.W_OK) and
                            os.access(self.res_startdir, os.X_OK | os.W_OK)):
                        raise Exception("Permission Denied")
            except Exception as e:
                print_error(
                    str(e) + ": Can not create an output dir " +
                    self.res_startdir)
                exit(1)
            else:
                results_execdir = file_Utils.createDir_addtimestamp(
                    self.res_startdir, self.nameonly)
                rfile = self.get_exec_file_by_type("Results", results_execdir)
        elif self.res_startdir is None:
            results_location = xml_Utils.getChildTextbyParentTag(
                self.filepath, 'Details', 'Resultsdir')

            # get default results directory
            # default_xml = Tools.__path__[0] + os.sep + 'w_settings.xml'
            default_xml = os.getenv(
                "WAR_TOOLS_DIR") + os.sep + 'w_settings.xml'
            default_resultsdir = get_credentials(default_xml, 'def_dir',
                                                 ['Resultsdir'], 'Setting')
            # use the default directory if user didn't define it in test case/test suite/project
            if results_location is None or results_location is False:
                if default_resultsdir['Resultsdir'] is not None:
                    results_location = default_resultsdir['Resultsdir']

            if results_location is None or results_location is False \
                    or str(results_location).strip() == "":
                results_execdir = self.create_def_exec_dir()  # proj_exec_dir
                rfile = self.get_exec_file_by_type("Results", results_execdir)

            elif results_location is not None and results_location is not False:
                results_location_rel = str(results_location).strip()
                results_location = file_Utils.getAbsPath(
                    results_location_rel, os.path.dirname(self.filepath))
                rfile, results_execdir = self.checkdir_create_file(
                    results_location, "Results")

        # print "printing results_execdir: ", results_execdir
        resultfile = file_Utils.getNewExtension(rfile, "xml")
        resultsdir = os.path.dirname(resultfile)
        return resultfile, resultsdir, results_execdir
def just_a_tuple(element):
    """ This function evaluates the value of the files tag and returned
    validated data.

        input: string

        (path/to/file4.txt;content_type), (path/to/file1.txt;content_type)

        output: dict

        {
        file4.txt: (file4.txt, open file4.txt (in binary mode), content_type)
        file1.txt: (file1.txt, open file1.txt (in binary mode), content_type)
        }

    """
    final_dict = {}
    temp_list = []
    element = element.strip(")")
    element = element.strip("(")
    if element is not None and element is not False and element != "":
        element = element.split(";")
        for i in range(0, len(element)):
            element[i] = element[i].strip()
        if element[0] is not None and element[0] is not False \
                and element[0] != "":
            abs_path = file_Utils.getAbsPath(element[0], sys.path[0])
            if os.path.exists(abs_path):
                temp_list.append(os.path.basename(os.path.normpath(element[0])))
                temp_list.append(open(abs_path, 'rb'))
                if len(element) > 1:
                    if element[1] is not None and element[1] is not False \
                            and element[1] != "":
                        temp_list.append(element[1])
            else:
                pNote("{0} doesn't exist!".format(element[0]), "error")
        else:
            if element == "":
                pNote("File path cannot be empty!", "error")
            else:
                pNote("File path cannot be {0}!".format(element[0]), "error")
        if temp_list != []:
            if len(temp_list) > 2:
                final_dict[temp_list[0]] = (temp_list[0], temp_list[1],
                                            temp_list[2])
            else:
                final_dict[temp_list[0]] = (temp_list[0], temp_list[1])
    else:
        if element == "":
            pNote("File path cannot be empty!", "error")
        else:
            pNote("File path cannot be {0}!".format(element), "error")
    return final_dict
 def check_defect_file(path):
     """Gets the list of defect json files for the testcase execution """
     abs_cur_dir = os.path.abspath(os.curdir)
     value = None
     if path.endswith(".json"):
         defect_file = file_Utils.getAbsPath(path, abs_cur_dir)
         if file_Utils.fileExists(defect_file):
             print_info("Defect file location is :{0}".format(defect_file))
             value = defect_file
         else:
             print_error("File Does not exist in provided location: "\
                         "{0} relative to cwd".format(path))
     return value
    def pc_replacement(self, system_name):
        """
        Verify lab PC is current if less than 4 years old, otherwise
        a replacement is required.
        """
        wdesc = "Check if lab PC is current or need replacement"
        #Resolve system_name and subsystem_list
        system_name, subsystem_list = Utils.data_Utils.resolve_system_subsystem_list(
            self.datafile, system_name)
        output_dict = {}
        status = True
        attempt = 1 if subsystem_list == None else len(subsystem_list)
        for i in range(attempt):
            Utils.testcase_Utils.pSubStep(wdesc)
            #Get name from the list when it's not 'None', otherwise, set it to 'None'
            subsystem_name = subsystem_list[
                i] if subsystem_list != None else None
            call_system_name = system_name if subsystem_name is None \
            else "{0}[{1}]".format(system_name, subsystem_name)
            credentials = get_credentials(self.datafile, call_system_name,
                                          ['dom', 'user', 'os', 'testdata'])
            pNote("system={0}".format(call_system_name))
            #Demo Framework testdata capability
            testdatafile = file_Utils.getAbsPath(
                credentials["testdata"], os.path.dirname(self.datafile))
            add_info = Utils.xml_Utils.getElementWithTagAttribValueMatch(testdatafile, \
                       'add_info', 'name', 'testdata')
            if add_info is not None:
                info_text = Utils.xml_Utils.get_text_from_direct_child(
                    add_info, 'info')
                pNote(info_text)
                pNote(testdatafile)

            if credentials is not None and credentials is not False:
                num_of_year = 4
                date_of_mfg = credentials["dom"]
                pass_msg = "Lab PC {0} is current, it's less than 4 years old."\
                           " A replacement is NOT required."\
                           .format(call_system_name)
                fail_msg = "Lab PC {0} is NOT current, it's more than than 4 "\
                           "years old. Please schedule for a replacement."\
                           .format(call_system_name)
                result = Utils.demo_utils.lab_eqpt_status(
                    date_of_mfg, num_of_year, pass_msg, fail_msg)

            Utils.data_Utils.update_datarepository(output_dict)
            Utils.testcase_Utils.report_substep_status(result)
            status = status and result

        return status, output_dict
    def testset_calibration(self, system_name):
        """
        Check if the test set calibration is current if less than 1 year old,
        otherwise, re-calibration is required.
        """
        wdesc = "Check if Lab Test set calibration is current."
        #Resolve system_name and subsystem_list
        system_name, subsystem_list = Utils.data_Utils.resolve_system_subsystem_list(
            self.datafile, system_name)
        output_dict = {}
        status = True
        attempt = 1 if subsystem_list == None else len(subsystem_list)
        for i in range(attempt):
            Utils.testcase_Utils.pSubStep(wdesc)
            #Get name from the list when it's not 'None', otherwise, set it to 'None'
            subsystem_name = subsystem_list[
                i] if subsystem_list != None else None
            call_system_name = system_name if subsystem_name is None \
            else "{0}[{1}]".format(system_name, subsystem_name)
            credentials = get_credentials(self.datafile, call_system_name, \
                                ['calibration', 'user', 'location', 'testdata'])
            pNote("system={0}".format(call_system_name))
            #Demo Framework testdata capability
            testdatafile = file_Utils.getAbsPath(
                credentials["testdata"], os.path.dirname(self.datafile))
            add_info = Utils.xml_Utils.getElementWithTagAttribValueMatch(testdatafile, \
                       'add_info', 'name', 'testdata')
            if add_info is not None:
                info_text = Utils.xml_Utils.get_text_from_direct_child(
                    add_info, 'info')
                pNote(info_text)
                pNote(testdatafile)

            if credentials is not None and credentials is not False:
                calibrated_date = credentials["calibration"]
                num_of_year = 1
                pass_msg = "Lab Test set {0} calibration is current, "\
                           "re-calibration is NOT required."\
                           .format(call_system_name)
                fail_msg = "Lab Test set {0} calibration is NOT current, it's "\
                           "more than than 1 year old. Re-calibration is "\
                           "required".format(call_system_name)
                result = Utils.demo_utils.lab_eqpt_status(
                    calibrated_date, num_of_year, pass_msg, fail_msg)

            Utils.data_Utils.update_datarepository(output_dict)
            Utils.testcase_Utils.report_substep_status(result)
            status = status and result

        return status, output_dict
    def check_get_datafile(self):
        """Check InputDatFile tag in the xml file and
        based on the values return the datafile to be used for the testcase/testsuite
            - If user provided a datafile, will use it.
            - If user specified 'Default' will use the default datafile
            - If user did not provide any value will use default datafile
            - If user specified 'NODATA' will print a msg saying so.
        """

        datafile = xml_Utils.getChildTextbyParentTag(self.filepath, 'Details',
                                                     'InputDataFile')
        if datafile is None or datafile is False or \
                str(datafile).strip() == "":
            if self.filetype == "tc":
                # print "get default datatype for testcase"
                datafile = get_default_xml_datafile(self.filepath)
            if self.filetype == "ts":
                # Check if test suite datatype starts with iterative.
                # If yes then get default datafile else set it as false
                # this is because at testsuite level input datafile is
                # supported only if the suite datatype is iterative seq/parallel
                datatype = self.check_get_datatype(False)
                if str(datatype).lower().startswith("iterative"):
                    datafile = get_default_xml_datafile(self.filepath)
                else:
                    datafile = False
            elif self.filetype == "proj":
                datafile = False
        elif str(datafile).strip().upper() == "DEFAULT":
            print_debug(
                "This testcase will be executed using the default InputDataFile"
            )
            datafile = get_default_xml_datafile(self.filepath)
        elif str(datafile).strip().upper() == 'NO_DATA':
            print_debug('This test case will be run without any InputDataFile')
            datafile = "NO_DATA"

        elif datafile is not None and datafile is not False:
            datafile_rel = str(datafile).strip()
            datafile = file_Utils.getAbsPath(datafile_rel,
                                             os.path.dirname(self.filepath))

        if str(datafile).strip().upper(
        ) != 'NO_DATA' and datafile is not False:
            if not file_Utils.fileExists(datafile):
                print_debug('\n')
                print_error("!!! *** InputDataFile does not exist in provided path:" \
                            "{0} *** !!!".format(datafile))
        return datafile
    def manual_defects(self, paths):
        """parse file list and create jira issue for each failures"""
        print_debug("manual-create defects")

        if self.path_type == "dir":
            defects_json_list = []
            i = 0
            abs_cur_dir = os.path.abspath(os.curdir)
            for path in paths:
                i += 1
                print_info("Directory {0}: {1}".format(i, path))
                defect_dir = file_Utils.getAbsPath(path, abs_cur_dir)
                if file_Utils.dirExists(defect_dir):
                    for j_file in os.listdir(path):
                        j_file = os.path.join(path, j_file)
                        if j_file is not None:
                            check_file = self.check_defect_file(j_file)
                            if check_file is not None:
                                defects_json_list.append(check_file)
                else:
                    print_error("Directory does not exist in provided path {0} "\
                                "relative to cwd".format(path))
                print_debug("\n")
        else:
            defects_json_list = []
            i = 0
            for path in paths:
                i += 1
                print_info("File {0}: {1}".format(i, path))
                check_file = self.check_defect_file(path)
                if check_file is not None:
                    defects_json_list.append(check_file)
                print_debug("\n")

        if len(defects_json_list) == 0:
            print_info("No defect json files found")
            exit(0)
        elif len(defects_json_list) > 0:
            for j_file in defects_json_list:
                data_repository = self.defects_json_parser(j_file)
                if data_repository is not None:
                    data_repository['jiraproj'] = self.jiraproj
                    defect_obj = defects_driver.DefectsDriver(data_repository)
                    if defect_obj.connect_warrior_jira() is True:
                        defect_obj.create_jira_issues([j_file])
Ejemplo n.º 14
0
    def testsuite_prerun(self,
                         testsuite_filepath,
                         root,
                         check_files_dict=None):
        """Executes prerun of a testsuite file """
        print_debug('\n')
        print_debug('*' * 40)
        print_debug("Validating Test suite xml")
        print_debug('*' * 40)

        testsuite_xsd_fullpath = self.xsd_dir + os.sep + 'warrior_suite.xsd'
        testsuite_status = self.xml_to_xsd_validation(testsuite_filepath,
                                                      testsuite_xsd_fullpath)
        if testsuite_status:
            data_file_valid, check_files_dict = self.check_testsuite_input_datafile(\
                testsuite_filepath, check_files_dict)
            testsuite_status &= data_file_valid
            for testcase in root.iter('Testcase'):
                tc_path_rel = testsuite_utils.get_path_from_xmlfile(testcase)
                tc_path = file_Utils.getAbsPath(
                    tc_path_rel, os.path.dirname(testsuite_filepath))
                time.sleep(5)
                if os.path.isfile(tc_path):
                    print_debug('\n')
                    print_info('tc_path: {0}, Testcase file exists...'.format(
                        tc_path))
                    tc_status = self.testcase_prerun(tc_path, check_files_dict)
                else:
                    print_debug('\n')
                    tc_status = False
                    print_error(
                        'tc_path: {0}, Testcase file does not exist'.format(
                            tc_path))
                    print_info('TC STATUS: {0}'.format('FAILED'))
                testsuite_status &= tc_status

        else:
            print_error("Incorrect xml format")

        time.sleep(5)
        print_debug('\n')
        status = testcase_Utils.convertLogic(testsuite_status)
        print_info('SUITE STATUS: {0}ED'.format(status))

        return testsuite_status
Ejemplo n.º 15
0
    def project_prerun(self, project_filepath, root):
        """Executes prerun of a project file """

        print_debug('\n')
        print_debug('+' * 40)
        print_debug("Validating Project xml")
        print_debug('+' * 40)
        project_xsd_fullpath = self.xsd_dir + os.sep + 'warrior_project.xsd'
        project_status = self.xml_to_xsd_validation(project_filepath,
                                                    project_xsd_fullpath)
        if project_status:
            check_files_dict = self.check_proj_results_logsdir(
                project_filepath)
            for testsuite in root.iter('Testsuite'):
                testsuite_path_rel = testsuite_utils.get_path_from_xmlfile(
                    testsuite)
                testsuite_path = file_Utils.getAbsPath(
                    testsuite_path_rel, os.path.dirname(project_filepath))

                if os.path.isfile(testsuite_path):
                    print_debug('\n')
                    print_info("Testsuite_path: {0}, Testsuite"\
                               "file exists...".format(testsuite_path))
                    ts_root = xml_Utils.getRoot(testsuite_path)
                    tsuite_status = self.testsuite_prerun(
                        testsuite_path, ts_root, check_files_dict)
                else:
                    print_debug('\n')
                    tsuite_status = False
                    print_error('testsuite_path: {0},\
                                Testsuite file does not exist'.format(
                        testsuite_path))
                    print_info('SUITE STATUS: {0}'.format('FAILED'))
                project_status &= tsuite_status

        else:
            print_error("Incorrect xml format")

        time.sleep(5)
        print_debug('\n')
        status = testcase_Utils.convertLogic(project_status)
        print_info('PROJECT STATUS: {0}ED'.format(status))

        return project_status
def resolve_value_of_verify(element):
    """This function verifies the validity of the parameter verify. If the value
    is 'yes', verify gets set to True. If 'no', it gets set to False.
    If it is a path to a file and if the file exists,
    it gets set to the filepath.
    Else None."""
    aff_pattern = re.compile('^yes$', re.IGNORECASE)
    neg_pattern = re.compile('^no$', re.IGNORECASE)
    if element is not None and element is not False and element != "":
        abs_path = file_Utils.getAbsPath(element, sys.path[0])
        if not os.path.exists(abs_path):
            if aff_pattern.match(element):
                element = True
            elif neg_pattern.match(element):
                element = False
            else:
                element = True
    else:
        element = True
    return element
 def verify_json(self, incoming_json, respond_obj, file=False):
     """
         Verify the incoming_json data with either
         a. whole json file
         b. key value pairs
         :param:
             incoming_json: a json string/json obj
             respond_obj: contains the verification detail from datafile
             file: indicate if comparing whole file or just pairs
         :return:
             True if whole file match/all pairs match
             False if not match
     """
     if isinstance(incoming_json, str):
         incoming_json = json.loads(incoming_json)
     if file:
         for expect_json_file in respond_obj["request_verify_data"]:
             expect_json_file = getAbsPath(expect_json_file,
                                           getDirName(self.datafile))
             expect_json = json.load(open(expect_json_file))
             if sorted(incoming_json.items()) == sorted(
                     expect_json.items()):
                 return True
         return False
     else:
         for json_pair in respond_obj["request_verify"]:
             json_keys = json_pair.split(",")[0][4:].split("[")
             # Since datafile is xml and it only have string
             # must have a way to process different object type in json
             json_value = literal_eval(json_pair.split(",")[1][6:])
             # travesing to get the child element value
             incoming_json_index = incoming_json
             for json_key in json_keys:
                 json_key = json_key.replace("]", "")
                 if json_key not in incoming_json_index:
                     return False
                 else:
                     incoming_json_index = incoming_json_index[json_key]
             if incoming_json_index != json_value:
                 return False
     return True
    def verify_xml(self, incoming_xml, respond_obj, file=False):
        """
            Verify the incoming_xml data with either
            a. whole xml file
            b. tag text pairs
            :param:
                incoming_xml: an xml string
                respond_obj: contains the verification detail from datafile
                file: indicate if comparing whole file or just pairs
            :return:
                True if whole file match/all pairs match
                False if not match
        """
        if file:
            status = False
            for expect_xml_file in respond_obj["request_verify_data"]:
                expect_xml_file = getAbsPath(expect_xml_file,
                                             getDirName(self.datafile))
                status, _, _, _ = compare_xml(incoming_xml,
                                              expect_xml_file,
                                              output_file=False,
                                              sorted_json=False,
                                              remove_namespaces=True)
            return status
        else:
            incoming_xml = ET.fromstring(incoming_xml,
                                         parser=ET.XMLParser(encoding="utf-8"))
            for element_pair in respond_obj["request_verify"]:
                xpath = element_pair.split(",")[0][4:]
                value = element_pair.split(",")[1][6:]
                incoming_value = getChildElementWithSpecificXpath(
                    incoming_xml, xpath)
                if incoming_value is None or value != incoming_value.text:
                    return False

        return True
def get_abs_path_from_start_dir(relative_path, start_directory, extension=".json"):
    """When provided with a start directory and a relative path, this function
    returns the absolute path. Else returns the relative path
    start_directory must be an absolute path
    """
    return file_Utils.getAbsPath(relative_path, start_directory)
def dict_in_tuple(element):
    """ This function evaluates the value of the files tag and returned
    validated data.

        input: string

        (path/to/file5.txt;content_type;(header1=value;header2=value2)),
        (path/to/file1.txt;content_type;(header3=value3;header4=value4))

        output: dict

        {
        file5.txt: (file5.txt, open file5.txt (in binary mode), content_type,
        { header1: value, header2: value2 }),
        file1.txt: (file1.txt, open file5.txt (in binary mode), content_type,
        { header3: value3, header4: value4 })
        }

    """
    final_dict = {}
    temp_list = []
    element = element.strip("))")
    element = element.strip("(")
    if element is not None and element is not False and element != "":
        element = element.split(";(")
        if element[0] is None or element[0] is False:
            pNote("File path cannot be {0}!".format(element), "error")
        elif element[0] == "":
            pNote("File path cannot be empty!", "error")
        else:
            element[0] = element[0].split(";")
            for j in range(0, len(element[0])):
                element[0][j] = element[0][j].strip()
            if element[0][0] is None or element[0][0] is False:
                pNote("File path cannot be {0}!".format(element), "error")
            elif element[0][0] == "":
                pNote("File path cannot be empty!", "error")
            else:
                abs_path = file_Utils.getAbsPath(element[0][0], sys.path[0])
                if os.path.exists(abs_path):
                    temp_list.append(os.path.basename(os.path.normpath(element[0][0])))
                    temp_list.append(open(abs_path, 'rb'))
                else:
                    pNote("{0} doesn't exist!".format(element[0]), "error")
            if len(element[0]) > 1:
                if element[0][1] is not None and element[0][1] is not False \
                        and element[0][1] != "":
                    temp_list.append(element[0][1])
                    if len(element) > 1:
                        if element[1] is not None and element[1] is not False \
                                and element[1] != "":
                            temp_list.append(convert_string_to_dict(element[1]))
            else:
                if len(element) > 1:
                    if element[1] is not None and element[1] is not False \
                            and element[1] != "":
                        temp_list.append("")
                        temp_list.append(convert_string_to_dict(element[1]))
        if temp_list != []:
            if len(temp_list) < 3:
                final_dict[temp_list[0]] = (temp_list[0], temp_list[1])
            elif len(temp_list) > 3:
                final_dict[temp_list[0]] = (temp_list[0], temp_list[1],
                                            temp_list[2], temp_list[3])
            elif len(temp_list) > 2:
                final_dict[temp_list[0]] = (temp_list[0], temp_list[1],
                                            temp_list[2])
    else:
        if element == "":
            pNote("File path cannot be empty!", "error")
        else:
            pNote("File path cannot be {0}!".format(element), "error")
    return final_dict
def dict_with_file_paths(element):
    """ This function evaluates the value of the files tag and returned
    validated data.

        input: string

        file_group_name=path/to/file2.txt; path/to/file3.txt

        output: dict

        {
        file_group_name_1: open file2.txt (in binary mode),
        file_group_name_2: open file3.txt (in binary mode),
        }

    """
    final_dict = {}
    temp_list = []
    if element is not None and element is not False and element != "":
        element = element.split("=")
        for i in range(0, len(element)):
            element[i] = element[i].strip()
        if len(element) < 2:
            pNote("File paths cannot be empty!", "error")
        else:
            if element[0] == "":
                pNote("File group name cannot be empty!", "error")
            elif element[0] is None or element[0] is False:
                pNote("File group name cannot be {0}!".format(element[0]),
                      "error")
            else:
                if element[1] == "":
                    pNote("File paths cannot be empty!", "error")
                elif element[1] is None or element[1] is False:
                    pNote("File paths cannot be {0}!".format(element[0]),
                          "error")
                else:
                    element[1] = element[1].split(";")
                    for i in range(0, len(element[1])):
                        element[1][i] = element[1][i].strip()
                        if element[1][i] is not None and element[1][i] is not \
                                False and element[1][i] != "":
                            abs_path = file_Utils.getAbsPath(element[1][i], sys.path[0])
                            if os.path.exists(abs_path):
                                temp_list.append(open(element[1][i], 'rb'))
                            else:
                                pNote("{0} doesn't exist!".format(element[1][i])
                                      , "error")
                        else:
                            if element[1][i] == "":
                                pNote("File path cannot be empty!", "error")
                            else:
                                pNote("File path cannot be {0}!"
                                      .format(element[1][i]), "error")
        if temp_list != []:
            for i in range(0, len(temp_list)):
                final_dict[element[0] + "_" + str(i+1)] = temp_list[i]
    else:
        if element == "":
            pNote("File group name and corresponding file paths cannot "
                  "be empty!", "error")
        else:
            pNote("File group name and corresponding file paths cannot"
                  " be {0}!".format(element), "error")
    return final_dict
def get_step_list(filepath,
                  step_tag,
                  sub_step_tag,
                  randomize=False,
                  loop_tag="Loop"):
    """
    Takes the location of Testcase/Suite/Project file as input
    Returns a list of all the step/testcase/testsuite elements
    present in the file.

    :Arguments:
        1. filepath     = full path of the Testcase/suite/project xml file
        2. step_tag     = xml tag for group of step in the file
        3. sub_step_tag = xml tag for each step in the file
        4. loop_tag     = xml tag for loop. Loop by default
    """
    step_list_with_rmt_retry = []
    root = Utils.xml_Utils.getRoot(filepath)
    steps = root.find(step_tag)
    if steps is None:
        print_warning("The file: '{0}' has no {1} to be executed".format(
            filepath, step_tag))
    step_list = []
    for child_node in steps:
        if child_node.tag == sub_step_tag:
            step_list.append(child_node)
        elif child_node.tag == loop_tag:
            loop_count = child_node.get("id")
            if loop_count is None:
                print_error('`id` attribute is mandatory in Loop tag.'
                            ' example : <Loop id="1" file="filename">')
                return False
            json_file = child_node.get("file")
            if json_file is None:
                print_error('`file` attribute is mandatory in Loop tag.'
                            ' example : <Loop id="1" file="filename">')
                return False
            loop_count = loop_count.strip()
            json_file = json_file.strip()
            json_file = Utils.data_Utils.sub_from_env_var(json_file)
            print_info("file is {}".format(json_file))
            loop_steps = child_node.findall(sub_step_tag)
            testcasefile_path = get_object_from_datarepository(
                'wt_testcase_filepath')
            valid_json = True
            try:
                filepath = getAbsPath(json_file,
                                      os.path.dirname(testcasefile_path))
                with open(filepath, "r") as json_handle:
                    json_doc = json.load(json_handle)
                    if isinstance(json_doc, dict):
                        json_doc = json_doc[loop_count]
                    loop_json = {loop_count: {"loop_json": json_doc}}
                    update_datarepository(loop_json)
                    update_datarepository({"loopid": loop_count})
                    if not isinstance(json_doc, (list, dict)):
                        valid_json = False
                        print_error(
                            'invalid json format specified,'
                            'valid format : [{"arg1":"value"}, {"arg2":"value"}]'
                            'or {"loop_id_1":[{"arg1":"value"}],'
                            ' "loop_id_2": [{"arg1":"value"}]}')
                    else:
                        if isinstance(json_doc, list):
                            for blob in json_doc:
                                if not isinstance(blob, dict):
                                    valid_json = False
                                    print_error(
                                        "element is {}. should be dict".format(
                                            type(blob)))
                                    print_error(
                                        'invalid json format specified,'
                                        'blob should be dict, valid format : '
                                        '[{"arg1":"value"}, '
                                        '{"arg2":"value"}]')
                        elif isinstance(json_doc, dict):
                            for each_loop_id in json_doc.keys():
                                if isinstance(json_doc[each_loop_id], list):
                                    for blob in json_doc[each_loop_id]:
                                        if not isinstance(blob, dict):
                                            valid_json = False
                                            print_error(
                                                "element is {}. should be dict"
                                                .format(type(blob)))
                                            print_error(
                                                'invalid json format specified,'
                                                'blob should be dict, valid format : '
                                                '{"loop_id_1":[{"arg1":"value"}, '
                                                '{"arg2":"value"}], '
                                                '"loop_id_2":[{"arg1":"value"},'
                                                ' {"arg2":"value"}]}')
                                else:
                                    valid_json = False
                                    print_error(
                                        "element is {}. should be list".format(
                                            type(json_doc[each_loop_id])))
                                    print_error(
                                        'invalid json format specified,'
                                        'blob should be dict,'
                                        ' valid format : [{"arg1":"value"},'
                                        ' {"arg2":"value"}]')
            except ValueError:
                valid_json = False
                print_error('The file {0} is not a valid json '
                            'file'.format(filepath))
            except IOError:
                valid_json = False
                print_error('The file {0} does not exist'.format(filepath))
            except Exception as error:
                valid_json = False
                print_error('Encountered {0} error'.format(error))

            if not valid_json:
                return False

            for iter_number, _ in enumerate(json_doc):
                for step_number, loop_step in enumerate(loop_steps):
                    copy_step = copy.deepcopy(loop_step)
                    copy_step.set("loop_id", "Loop:{}-Step:{}-Iter:{}".\
                            format(loop_count, step_number+1, iter_number+1))
                    copy_step.set("loop_iter_number", iter_number)
                    copy_step.set("loopid", loop_count)
                    arguments = copy_step.find('Arguments')
                    if arguments is not None and arguments is not False:
                        for argument in arguments.findall('argument'):
                            arg_value = argument.get('value')
                            arg_value = Utils.data_Utils.sub_from_loop_json(
                                arg_value, iter_number)
                            argument.set("value", arg_value)
                    step_list.append(copy_step)

    if root.tag == 'Project' or root.tag == 'TestSuite':
        step_list = []
        orig_step_list = steps.findall(sub_step_tag)
        for orig_step in orig_step_list:
            orig_step_path = orig_step.find('path').text
            if '*' not in orig_step_path:
                step_list.append(orig_step)
            # When the file path has asterisk(*), get the Warrior XML testcase/testsuite
            # files matching the given pattern
            else:
                orig_step_abspath = Utils.file_Utils.getAbsPath(
                    orig_step_path, os.path.dirname(filepath))
                print_info("Provided {0} path: '{1}' has asterisk(*) in "
                           "it. All the Warrior XML files matching "
                           "the given pattern will be executed.".format(
                               sub_step_tag, orig_step_abspath))
                # Get all the files matching the pattern and sort them by name
                all_files = sorted(glob.glob(orig_step_abspath))
                # Get XML files
                xml_files = [fl for fl in all_files if fl.endswith('.xml')]
                step_files = []
                # Get Warrior testcase/testsuite XML files
                for xml_file in xml_files:
                    root = Utils.xml_Utils.getRoot(xml_file)
                    if root.tag.upper() == sub_step_tag.upper():
                        step_files.append(xml_file)
                # Copy the XML object and set the filepath as path value for
                # all the files matching the pattern
                if step_files:
                    for step_file in step_files:
                        new_step = copy.deepcopy(orig_step)
                        new_step.find('path').text = step_file
                        step_list.append(new_step)
                        print_info("{0}: '{1}' added to the execution "
                                   "list ".format(sub_step_tag, step_file))
                else:
                    print_warning(
                        "Asterisk(*) pattern match failed for '{}' due "
                        "to at least one of the following reasons:\n"
                        "1. No files matched the given pattern\n"
                        "2. Invalid testcase path is given\n"
                        "3. No testcase XMLs are available\n"
                        "Given path will be used for the Warrior "
                        "execution.".format(orig_step_abspath))
                    step_list.append(orig_step)

        if randomize:
            random.shuffle(step_list)
    # iterate all steps to get the runmode and retry details
    for _, step in enumerate(step_list):
        runmode, value, _ = get_runmode_from_xmlfile(step)
        retry_type, _, _, retry_value, _ = get_retry_from_xmlfile(step)
        if runmode is not None and value > 0:
            go_next = len(step_list_with_rmt_retry) + value + 1
            step_list_with_rmt_retry = append_step_list(
                step_list_with_rmt_retry,
                step,
                value,
                go_next,
                mode="runmode",
                tag="value")
        if retry_type is not None and retry_value > 0:
            go_next = len(step_list_with_rmt_retry) + retry_value + 1
            if runmode is not None:
                get_runmode = step.find('runmode')
                step.remove(get_runmode)
            step_list_with_rmt_retry = append_step_list(
                step_list_with_rmt_retry,
                step,
                retry_value,
                go_next,
                mode="retry",
                tag="count")
        if retry_type is None and runmode is None:
            step_list_with_rmt_retry.append(step)
    return step_list_with_rmt_retry
Ejemplo n.º 23
0
    def execute_robot_wrapper(self, system_name, session_name=None):
        """
        This keyword is to execute python scripts which internally calls robot scripts.
        :Arguments:
            1. system_name(string) - Name of the system/subsystem in the datafile
            2. session_name(string) - name of the session to the system
        :Returns:
            1. status(bool)= True/False
        :Datafile usage:
            Tags or attributes to be used in input datafile for the system/subsystem
            If both tag and attribute is provided the attribute will be used
            1. ip = IP address of the system where the python script will be executed
                Default value for ip type is ip, it can take any type of ip's
                to connect to (like ipv4, ipv6, dns etc)
                Users can provide tag/attribute for any ip_type under the
                system in the input datafile and specify the tag/attribute name
                as the value for ip_type argument, then the connection will be
                established using that value
            2. username = username for the session
            3. password = password for the session
            4. end_prompt = prompt expected when the command(python script) execution
                is successful, default value: .*(%|#|\$).
            5. remote = 'yes' when executed in remote system & 'no'(default)
                when executed in local system
            6. file_path = path of the python script to be executed
            7. output_dir = directory path used as outputdir for robot scripts
               available in the python script(in execution machine). All the
               Robot tests listed in the Python script should have same output directory.
            8. local_output_dir = path of the directory in the local system
                where the robot output files from remote system will be copied.
                If this tag is not available or left empty, results will be
                stored in 'home/<username>/robot_wrapper_opdir' directory.
            Note: Tags 1,2,3 & 8 are only required to copy the results from
             remote to local system  when remote(5) argument is set to 'yes'.
        """

        session_id = get_session_id(system_name, session_name)
        session_object = get_object_from_datarepository(session_id)

        credentials = get_credentials(self.datafile, system_name, [
            'ip', 'username', 'password', 'end_prompt', 'remote', 'file_path',
            'output_dir', 'local_output_dir'
        ])

        if not credentials['file_path'] or not credentials['output_dir']:
            pNote(
                "Please provide values for 'file_path & output_dir' "
                "tags in input data_file", 'warning')
            return False

        if credentials['end_prompt']:
            prompt = credentials['end_prompt']
        else:
            prompt = ".*(%|#|\$)"

        data_directory = os.path.dirname(self.datafile)
        abs_filepath = getAbsPath(credentials['file_path'], data_directory)
        abs_output_dir = getAbsPath(credentials['output_dir'], data_directory)

        current_time = time.time()
        if os.path.isfile(abs_filepath):
            command = "python " + abs_filepath
            status = session_object.send_command(".*", prompt, command)[0]
            if status is True:
                pNote("Robot_wrapper script: '{}' execution is successful".
                      format(abs_filepath))
            else:
                pNote(
                    "Robot_wrapper script: '{}' execution failed".format(
                        abs_filepath), 'warning')
        else:
            pNote(
                "Robot_wrapper script: '{}' does not exist".format(
                    abs_filepath), 'warning')
            status = False

        # When executed in remote machine
        if credentials['remote'] and credentials['remote'].upper() == "YES":

            if credentials['local_output_dir']:
                local_output_dir = getAbsPath(credentials['local_output_dir'],
                                              data_directory)
            else:
                local_output_dir = "~/robot_wrapper_opdir"
            get_file_from_remote_server(credentials['ip'],
                                        credentials['username'],
                                        credentials['password'],
                                        abs_output_dir, local_output_dir)
            abs_output_dir = local_output_dir + os.sep + os.path.basename(
                abs_output_dir)
        # Get the modified xml files in the output_dir
        modified_list = get_modified_files(abs_output_dir, current_time,
                                           ".xml")
        # Get the robot xml files from the modified list of files
        robot_xml_list = robot_wrapper_utils.get_robot_xml_files(modified_list)
        # Get results from robot xml files
        robot_test_results = robot_wrapper_utils.get_results_from_robot_xml(
            robot_xml_list)
        # Create junit for robot tests
        robot_wrapper_utils.create_case_junit(robot_test_results)

        return status
    def set_env_var(self,
                    var_key=None,
                    var_value=None,
                    filepath=None,
                    jsonkey="environmental_variables",
                    overwrite="yes"):
        """Create a temp environment variable, the value will only stay for the current Execution
        :Argument:
            var_key = key of the environment variable
            var_value = value of the environment variable
            filepath = Json file where Environmental variables are defined
            jsonkey = The key where all the ENV variable & values are defined
        With jsonkey arg, Users can call same file to set various ENV Variable
            overwrite = Yes-Will overwrite ENV variables set earlier via terminal or other means
                        No -Will not overwrite the ENV variables set earlier with the ones passed
                            through this keyword.

        Variable File :
        Sample environmental_variable file is available under
        Warriorspace/Config_file/Samples/Set_ENV_Variable_Sample.json
        """
        wDesc = "Create a temp environment variable, the value will only stay for the \
        current Execution"

        Utils.testcase_Utils.pNote(
            "KEYWORD: set_env_var | Description: {0}".format(wDesc))
        overwrite = overwrite.upper()
        status = False
        if not any([var_key, var_value, filepath]):
            print_error(
                'Either Provide values to arguments \"var_key\" & \"var_value\" or to '
                'argument \"filepath\"')
        if overwrite == "NO" and os.getenv(var_key):
            print_info("Using ENV variable {0} set earlier with "
                       "value '{1}'".format(var_key, os.getenv(var_key)))
        elif var_key is not None and var_value is not None and overwrite in [
                "YES", "NO"
        ]:
            os.environ[var_key] = var_value
            if os.environ[var_key] == var_value:
                print_info("Set ENV variable {0} with value '{1}'".format(
                    var_key, var_value))
                status = True
        else:
            print_error(
                'The attribute overwrite can only accept values either yes or no'
            )
        if filepath is not None:
            testcasefile_path = get_object_from_datarepository(
                'wt_testcase_filepath')
            try:
                filepath = getAbsPath(filepath,
                                      os.path.dirname(testcasefile_path))
                with open(filepath, "r") as json_handle:
                    get_json = json.load(json_handle)
                    if jsonkey in get_json:
                        env_dict = get_json[jsonkey]
                        for var_key, var_value in list(env_dict.items()):
                            if overwrite == "NO" and os.getenv(var_key):
                                print_info(
                                    'Using ENV variable {0} set earlier with value '
                                    '{1}'.format(var_key, os.getenv(var_key)))
                                status = True
                            elif overwrite in ["YES", "NO"]:
                                os.environ[var_key] = str(var_value)
                                if os.environ[var_key] == var_value:
                                    print_info(
                                        'Setting ENV variable {0} with value '
                                        '{1}'.format(var_key, var_value))
                                    status = True
                            else:
                                print_error(
                                    'The attribute overwrite can only accept values either '
                                    'yes or no')
                    else:
                        print_error(
                            'The {0} file is missing the key '
                            '\"environmental_variables\", please refer to '
                            'the Samples in Config_files'.format(filepath))
                        status = False
            except ValueError:
                print_error('The file {0} is not a valid json '
                            'file'.format(filepath))
                status = False
            except IOError:
                print_error('The file {0} does not exist'.format(filepath))
                status = False
            except Exception as error:
                print_error('Encountered {0} error'.format(error))
                status = False

        return status
    def store_in_repo(self,
                      datavar=None,
                      datavalue=None,
                      type='str',
                      filepath=None,
                      jsonkey="repo_variables",
                      bool_store_all=False):
        """Stores datavalue in datavar of datarepository
        :Argument:
            1. datavar = Key to be used to store datavalue in data_repository,
                         this could be dot separated to store in nested fashion
                            i.e., if var is k1.k2.k3 then the data value would be
                            stored as a value in datarepository[k1][k2][k3]
            2. datavalue = Value to be stored
            3. type = Type of datavalue(string/int/float)
            4. filepath = Json file where datarepository variables are defined.
                          It is to store multiple key,value pairs in datarepository.
            5. jsonkey = The key where all the REPO variables & values are
                         defined in the filepath
            6. bool_store_all = Set to True to store whole json file content to data repository.
                                Keys from the json file will be used as it is to store in repo
                                if this value is set to True.
                                default value is set to False

            Sample JSON file:
                 {
                     "repo_variables": {
                         "var1": {"type": "int", "value": "10"},
                         "var2.var3": {"value": "10"},
                         "var4.var5": "1"
                         },
                     "user_defined_tag":{
                         "var6" : {"type": "int", "value": "40"}
                         }
                 }
            All three formats in the above sample block are allowed. If 'type'
            is not provided, value will be converted as string by default.
        """
        wDesc = "Stores the values to the data_repository"
        Utils.testcase_Utils.pNote(
            "KEYWORD: store_in_repo | Description: {0}".format(wDesc))
        status = False
        pass_msg = "Value: {0} is stored in a Key: {1} of Warrior data_repository"

        if datavar is not None and datavalue is not None:
            if type == 'int':
                datavalue = int(datavalue)
            elif type == 'float':
                datavalue = float(datavalue)
            dict_to_update = Utils.dict_Utils.get_dict_to_update(
                datavar, datavalue)
            update_datarepository(dict_to_update)
            print_info(pass_msg.format(datavalue, datavar))
            status = True

        if filepath is not None:
            testcasefile_path = get_object_from_datarepository(
                'wt_testcase_filepath')
            try:
                filepath = getAbsPath(filepath,
                                      os.path.dirname(testcasefile_path))
                with open(filepath, "r") as json_handle:
                    json_doc = json.load(json_handle)
                #if bool_store_all is set to True, all content of given json file will be
                #stored in data repository
                if isinstance(bool_store_all, bool) and bool_store_all is True:
                    print_info(
                        "bool_store_all is set to True, all content of given"
                        " json file will be stored in data repository")
                    update_datarepository(json_doc)
                    print_info("{0} dictionary stored in Warrior data_repository".\
                        format(json_doc))
                    status = True
                elif not isinstance(bool_store_all, bool):
                    print_error("invalid value : {0} given for bool_store_all,"
                                "valid value: boolean True or False".format(
                                    bool_store_all))
                    status = False
                elif jsonkey in json_doc:
                    dict_to_update = {}
                    repo_dict = json_doc[jsonkey]
                    for var_key, var_value in list(repo_dict.items()):
                        if isinstance(var_value, dict):
                            if var_value.get('type') == 'int':
                                value = int(var_value['value'])
                            elif var_value.get('type') == 'float':
                                value = float(var_value['value'])
                            else:
                                value = str(var_value['value'])
                        else:
                            value = str(var_value)
                        build_dict = Utils.dict_Utils.get_dict_to_update(
                            var_key, value)
                        Utils.dict_Utils.verify_key_already_exists_and_update\
                           (orig_dict=dict_to_update, new_dict=build_dict)
                    update_datarepository(dict_to_update)
                    print_info("{0} dictionary stored in Warrior data_repository".\
                        format(dict_to_update))
                    status = True
                else:
                    print_error('The {0} file is missing the key '
                                '\"repo_variables\", please refer to '
                                'the Samples in Config_files'.format(filepath))
                    status = True
            except ValueError:
                print_error('The file {0} is not a valid json '
                            'file'.format(filepath))
            except IOError:
                print_error('The file {0} does not exist'.format(filepath))
            except Exception as error:
                print_error('Encountered {0} error'.format(error))

        if (type is None or datavalue is None) and filepath is None:
            print_error('Either Provide values to arguments \"datavar\" & '
                        '\"datavalue\" or to argument \"filepath\"')

        return status
Ejemplo n.º 26
0
    def __check_input_datafile(cls, filepath, testname, check_files_dict):
        """ Verify that the input data file exists in the path provided.
            If path not provided verify the default data file

        Arguments:
              1. filepath: filepath will be parsed as input for checking
                 Input data
              3. testname: to mention whether it is Testcase/Testsuite datafile
              2. check_files_dict: a dict element to check the status of files
                 whether it has been verified already or not

        Return:
              1. result(bool): if the Datafiles exist, returns True: else False
              2. check_files_dict: a dict element to check the status of files
                 whether it has been verified already or not
        """

        result = []

        input_data_file = xml_Utils.getChildTextbyParentTag(
            filepath, 'Details', 'InputDataFile')
        if input_data_file is not False and input_data_file is not None:
            if testname is 'Testsuite':
                check_files_dict['check_datafile'] = True
            input_data_file = str(input_data_file).strip()
            if str(input_data_file).upper() == 'NO_DATA':
                print_debug('No_Data option selected for this testcase')
                result.append(True)

            elif 'NO_DATA' not in str(input_data_file).upper():

                data_file_path = file_Utils.getAbsPath(
                    input_data_file, os.path.dirname(filepath))
                print_info("{0} input data_file_path: {1}".format(
                    testname, data_file_path))
                if os.path.exists(data_file_path):
                    print_info("{0} Input datafile is present "\
                                "in location {1}".format(testname, data_file_path))
                    result.append(True)
                else:
                    print_error("{0} Input datafile is NOT "\
                                 "present in location {1}".format(testname, data_file_path))
                    result.append(False)

        elif input_data_file is None or input_data_file is False:
            if testname is 'Testcase':
                print_debug("InputDataFile is not provided,"\
                           "checking if default InputDataFile exists....")
                default_datafilepath = execution_files_class.get_default_xml_datafile(\
                    filepath)
                print_debug(
                    "default_datafile_path: {0}".format(default_datafilepath))
                if os.path.exists(default_datafilepath):
                    print_debug(
                        "Default input datafile for the Testcase is available")
                    result.append(True)
                else:
                    print_error(
                        "Default input datafile for the Testcase is NOT available"
                    )
                    result.append(False)
            else:
                check_files_dict['check_datafile'] = False

        return result, check_files_dict
Ejemplo n.º 27
0
    def check_opt_values_from_datafile(
            self,
            langs=['Sanskrit', 'Tamil'],
            strvar="I am a default variable",
            states="wtag=states",
            system_name="sys_wtag",
            currencys={'USA': 'USD'},
            ramspace=False,
            configfile="../../config_files/check_file_type",
            intvar=496):
        """Verify the datatype of the value read from the datafile using either
        the tag or wtag feature
        :Argument:
            1. system_name = system name in the datafile
            2. strvar = string variable
            3. langs = list variable (should get from data file using wtag)
            4. states = tuple variable
            5. currencys = dict variable
            6. ramspace = boolean variable
            7. configfile = file variable
            8. intvar = int variable
        """
        def check_type(var, varname, datatype):
            """check that vars are of correct datatype
            """
            vartype = type(var)
            status = True
            if vartype is not datatype:
                pNote(
                    '{} is expected to be {} type, but found to be of '
                    '{} type'.format(varname, datatype, vartype), "error")
                status = False
            return status

        status = True
        datafile = Utils.config_Utils.datafile
        tc_filepath = os.path.dirname(
            data_Utils.get_object_from_datarepository('wt_testcase_filepath'))
        # this block checks if strvar is string type
        status = check_type(strvar, "strvar", str) and status
        # this block checks if langs is list type
        status = check_type(langs, "langs", list) and status
        # this block checks if states is tuple type
        status = check_type(states, "states", tuple) and status
        # this block checks if currencys is dict type
        status = check_type(currencys, "currencys", dict) and status
        # this block checks if ramspace is bool type
        status = check_type(ramspace, "ramspace", bool) and status
        file_err = '{} is not a file, please check'
        try:
            # this checks if configfile and anotherfile are valid files
            # by getting the absolute path of the file
            if not os.path.isabs(configfile):
                configfile = file_Utils.getAbsPath(configfile, tc_filepath)
            if not os.path.isfile(configfile):
                pNote(file_err.format(configfile), "error")
        except AttributeError:
            pNote('configfile and anotherfile are expected to be files',
                  "error")
            pNote('type of configfile is {}'.format(type(configfile)), "error")
            status = False
        if type(intvar) is str and intvar.startswith('tag'):
            intvar = data_Utils.resolve_argument_value_to_get_tag_value(
                datafile, system_name, intvar)
        else:
            status = check_type(intvar, "intvar", int) and status
        return status
    def build_server(self, datafile, system_name):
        """
            Take in a system and read all its routes
            Load the routes into Bottle server object
            Start a thread with the bottle server

            return the bottle server adapter and server thread
        """
        app = Bottle()
        # Get system and routes
        system_data = data_Utils.get_credentials(datafile, system_name)
        self.datafile = datafile

        route_file = system_data['mapping_file']
        if route_file:
            route_file = getAbsPath(route_file, getDirName(datafile))
        # Loop through each route
        for route in data_Utils.get_all_system_or_subsystem(route_file):
            route_name = route.get('name')
            if route_name[0] != '/':
                route_name = '/' + route_name

            # Group request condition with the same method together
            route_methods = {}
            for request in route:
                request_method = request.find('request_method').text.upper()
                if request_method not in route_methods:
                    route_methods[request_method] = [request]
                else:
                    route_methods[request_method].append(request)

            # Build route with the grouped conditions
            for method_type, same_type_methods in list(route_methods.items()):
                # A route can have general response and conditional response
                specific_res = []
                general_res = {}

                for method in same_type_methods:
                    dict_of_info = {}
                    method_req = {}
                    method_res = {}

                    # Get all info from the condition
                    for info in iter(method):
                        if info.tag in dict_of_info:
                            dict_of_info[info.tag].append(info.text)
                        else:
                            dict_of_info[info.tag] = [info.text]

                    # Extract request/response related info
                    for key, value in list(dict_of_info.items()):
                        if key in request_verify_list:
                            method_req = {key: value}
                        elif key in response_list:
                            method_res[key] = value

                    if any([
                            key in request_verify_list
                            for key in list(dict_of_info.keys())
                    ]):
                        # this condition has request/response pair
                        method_combine = method_req
                        method_combine.update(method_res)
                        specific_res.append(method_req)
                        # this ensure when all verification fail and no general response given
                        # there will be some responses
                        if any([
                                key in on_fail_response_list
                                for key in list(dict_of_info.keys())
                        ]):
                            general_res.update(method_res)
                    else:
                        # this condition only has general response
                        general_res.update(method_res)

                app.route(
                    route_name, method_type,
                    self.build_route(route_name, method_type, specific_res,
                                     general_res))

        # Build a class to hold the server so it can be closed easily
        port = 5000 if "port" not in system_data else int(system_data["port"])
        server = ServerHandler(host="0.0.0.0", port=port)
        server_thread = threading.Thread(target=run,
                                         kwargs={
                                             "app": app,
                                             "server": server,
                                             "debug": True
                                         })
        server_thread.daemon = True
        server_thread.start()
        sleep(2)

        if server_thread.is_alive():
            return True, {"server": server, "server_thread": server_thread}
        else:
            return False, {}