def verify_xml(self, incoming_xml, respond_obj, file=False): """ Verify the incoming_xml data with either a. whole xml file b. tag text pairs :param: incoming_xml: an xml string respond_obj: contains the verification detail from datafile file: indicate if comparing whole file or just pairs :return: True if whole file match/all pairs match False if not match """ if file: status = False for expect_xml_file in respond_obj["request_verify_data"]: expect_xml_file = getAbsPath(expect_xml_file, getDirName(self.datafile)) status, _, _, _ = compare_xml(incoming_xml, expect_xml_file, output_file=False, sorted_json=False, remove_namespaces=True) return status else: incoming_xml = ET.fromstring(incoming_xml, parser=ET.XMLParser(encoding="utf-8")) for element_pair in respond_obj["request_verify"]: xpath = element_pair.split(",")[0][4:] value = element_pair.split(",")[1][6:] incoming_value = getChildElementWithSpecificXpath( incoming_xml, xpath) if incoming_value is None or value != incoming_value.text: return False return True
def get_result_files(self): """Get execution results dir and files """ if self.res_startdir is not None: results_execdir = file_Utils.createDir_addtimestamp( self.res_startdir, self.nameonly) rfile = self.get_exec_file_by_type("Results", results_execdir) elif self.res_startdir is None: results_location = xml_Utils.getChildTextbyParentTag( self.filepath, 'Details', 'Resultsdir') #get default results directory default_xml = Tools.__path__[0] + os.sep + 'w_settings.xml' default_resultsdir = get_credentials(default_xml, 'def_dir', ['Resultsdir'], 'Setting') #use the default directory if user didn't define it in test case/test suite/project if results_location is None or results_location is False: if default_resultsdir['Resultsdir'] is not None: results_location = default_resultsdir['Resultsdir'] if results_location is None or results_location is False\ or str(results_location).strip() == "": results_execdir = self.create_def_exec_dir() #proj_exec_dir rfile = self.get_exec_file_by_type("Results", results_execdir) elif results_location is not None and results_location is not False: results_location_rel = str(results_location).strip() results_location = file_Utils.getAbsPath( results_location_rel, os.path.dirname(self.filepath)) rfile, results_execdir = self.checkdir_create_file( results_location, "Results") # print "printing results_execdir: ", results_execdir resultfile = file_Utils.getNewExtension(rfile, "xml") resultsdir = os.path.dirname(resultfile) return resultfile, resultsdir, results_execdir
def __check_input_datafile(cls, filepath, testname, check_files_dict): """ Verify that the input data file exists in the path provided. If path not provided verify the default data file Arguments: 1. filepath: filepath will be parsed as input for checking Input data 3. testname: to mention whether it is Testcase/Testsuite datafile 2. check_files_dict: a dict element to check the status of files whether it has been verified already or not Return: 1. result(bool): if the Datafiles exist, returns True: else False 2. check_files_dict: a dict element to check the status of files whether it has been verified already or not """ result = [] input_data_file = xml_Utils.getChildTextbyParentTag( filepath, 'Details', 'InputDataFile') if input_data_file is not False and input_data_file is not None: if testname is 'Testsuite': check_files_dict['check_datafile'] = True input_data_file = str(input_data_file).strip() if str(input_data_file).upper() == 'NO_DATA': print_info('No_Data option selected for this testcase') result.append(True) elif 'NO_DATA' not in str(input_data_file).upper(): data_file_path = file_Utils.getAbsPath( input_data_file, os.path.dirname(filepath)) print_info("{0} input data_file_path: {1}".format( testname, data_file_path)) if os.path.exists(data_file_path): print_info("{0} Input datafile is present "\ "in location {1}".format(testname, data_file_path)) result.append(True) else: print_error("{0} Input datafile is NOT "\ "present in location {1}".format(testname, data_file_path)) result.append(False) elif input_data_file is None or input_data_file is False: if testname is 'Testcase': print_info("InputDataFile is not provided,"\ "checking if default InputDataFile exists....") default_datafilepath = execution_files_class.get_default_xml_datafile(\ filepath) print_debug( "default_datafile_path: {0}".format(default_datafilepath)) if os.path.exists(default_datafilepath): print_info( "Default input datafile for the Testcase is available") result.append(True) else: print_error( "Default input datafile for the Testcase is NOT available" ) result.append(False) else: check_files_dict['check_datafile'] = False return result, check_files_dict
def set_env_var(self, var_key=None, var_value=None, filepath=None, jsonkey="environmental_variables", overwrite="yes"): """Create a temp environment variable, the value will only stay for the current Execution :Argument: var_key = key of the environment variable var_value = value of the environment variable filepath = Json file where Environmental variables are defined jsonkey = The key where all the ENV variable & values are defined With jsonkey arg, Users can call same file to set various ENV Variable overwrite = Yes-Will overwrite ENV variables set earlier via terminal or other means No -Will not overwrite the ENV variables set earlier with the ones passed through this keyword. Variable File : Sample environmental_variable file is available under Warriorspace/Config_file/Samples/Set_ENV_Variable_Sample.json """ overwrite = overwrite.upper() status = False if not any([var_key, var_value, filepath]): print_error( 'Either Provide values to arguments \"var_key\" & \"var_value\" or to ' 'argument \"filepath\"') if overwrite == "NO" and os.getenv(var_key): print_info("Using ENV variable {0} set earlier with " "value '{1}'".format(var_key, os.getenv(var_key))) elif var_key is not None and var_value is not None and overwrite in [ "YES", "NO" ]: os.environ[var_key] = var_value if os.environ[var_key] == var_value: print_info("Set ENV variable {0} with value '{1}'".format( var_key, var_value)) status = True else: print_error( 'The attribute overwrite can only accept values either yes or no' ) if filepath is not None: testcasefile_path = get_object_from_datarepository( 'wt_testcase_filepath') try: filepath = getAbsPath(filepath, os.path.dirname(testcasefile_path)) with open(filepath, "r") as json_handle: get_json = json.load(json_handle) if jsonkey in get_json: env_dict = get_json[jsonkey] for var_key, var_value in env_dict.items(): if overwrite == "NO" and os.getenv(var_key): print_info( 'Using ENV variable {0} set earlier with value ' '{1}'.format(var_key, os.getenv(var_key))) status = True elif overwrite in ["YES", "NO"]: os.environ[var_key] = str(var_value) if os.environ[var_key] == var_value: print_info( 'Setting ENV variable {0} with value ' '{1}'.format(var_key, var_value)) status = True else: print_error( 'The attribute overwrite can only accept values either ' 'yes or no') else: print_error( 'The {0} file is missing the key ' '\"environmental_variables\", please refer to ' 'the Samples in Config_files'.format(filepath)) status = False except ValueError: print_error('The file {0} is not a valid json ' 'file'.format(filepath)) status = False except IOError: print_error('The file {0} does not exist'.format(filepath)) status = False except Exception as error: print_error('Encountered {0} error'.format(error)) status = False return status
def store_in_repo(self, datavar=None, datavalue=None, type='str', filepath=None, jsonkey="repo_variables", bool_store_all=False): """Stores datavalue in datavar of datarepository :Argument: 1. datavar = Key to be used to store datavalue in data_repository, this could be dot separated to store in nested fashion i.e., if var is k1.k2.k3 then the data value would be stored as a value in datarepository[k1][k2][k3] 2. datavalue = Value to be stored 3. type = Type of datavalue(string/int/float) 4. filepath = Json file where datarepository variables are defined. It is to store multiple key,value pairs in datarepository. 5. jsonkey = The key where all the REPO variables & values are defined in the filepath 6. bool_store_all = Set to True to store whole json file content to data repository. keys from the json file will be used as it is to store in repo if this value is set to True. default value is set to False. Sample JSON file: { "repo_variables": { "var1": {"type": "int", "value": "10"}, "var2.var3": {"value": "10"}, "var4.var5": "1" }, "user_defined_tag":{ "var6" : {"type": "int", "value": "40"} } } All three formats in the above sample block are allowed. If 'type' is not provided, value will be converted as string by default. """ status = False pass_msg = "Value: {0} is stored in a Key: {1} of Warrior data_repository" if datavar is not None and datavalue is not None: if type == 'int': datavalue = int(datavalue) elif type == 'float': datavalue = float(datavalue) dict_to_update = Utils.dict_Utils.get_dict_to_update(datavar, datavalue) update_datarepository(dict_to_update) print_info(pass_msg.format(datavalue, datavar)) status = True if filepath is not None: testcasefile_path = get_object_from_datarepository('wt_testcase_filepath') try: filepath = getAbsPath(filepath, os.path.dirname(testcasefile_path)) with open(filepath, "r") as json_handle: json_doc = json.load(json_handle) #if bool_store_all is set to True, all content of given json file will be #stored in data repository if isinstance(bool_store_all, bool) and bool_store_all is True: print_info("bool_store_all is set to True, all content of given" " json file will be stored in data repository") update_datarepository(json_doc) print_info("{0} dictionary stored in Warrior data_repository". format(json_doc)) status = True elif not isinstance(bool_store_all, bool): print_error("invalid value : {0} given for bool_store_all," "valid value: boolean True or False".format(bool_store_all)) status = False elif jsonkey in json_doc: dict_to_update = {} repo_dict = json_doc[jsonkey] for var_key, var_value in repo_dict.items(): if isinstance(var_value, dict): if var_value.get('type') == 'int': value = int(var_value['value']) elif var_value.get('type') == 'float': value = float(var_value['value']) else: value = str(var_value['value']) else: value = str(var_value) build_dict = Utils.dict_Utils.get_dict_to_update(var_key, value) Utils.dict_Utils.verify_key_already_exists_and_update\ (orig_dict=dict_to_update, new_dict=build_dict) update_datarepository(dict_to_update) print_info("{0} dictionary stored in Warrior data_repository".\ format(dict_to_update)) status = True else: print_error('The {0} file is missing the key ' '\"repo_variables\", please refer to ' 'the Samples in Config_files'.format(filepath)) status = True except ValueError: print_error('The file {0} is not a valid json ' 'file'.format(filepath)) except IOError: print_error('The file {0} does not exist'.format(filepath)) except Exception as error: print_error('Encountered {0} error'.format(error)) if (type is None or datavalue is None) and filepath is None: print_error('Either Provide values to arguments \"datavar\" & ' '\"datavalue\" or to argument \"filepath\"') return status
def check_opt_values_from_datafile( self, langs=['Sanskrit', 'Tamil'], strvar="I am a default variable", states="wtag=states", system_name="sys_wtag", currencys={'USA': 'USD'}, ramspace=False, configfile="../../config_files/check_file_type", intvar=496): """Verify the datatype of the value read from the datafile using either the tag or wtag feature :Argument: 1. system_name = system name in the datafile 2. strvar = string variable 3. langs = list variable (should get from data file using wtag) 4. states = tuple variable 5. currencys = dict variable 6. ramspace = boolean variable 7. configfile = file variable 8. intvar = int variable """ def check_type(var, varname, datatype): """check that vars are of correct datatype """ vartype = type(var) status = True if vartype is not datatype: pNote( '{} is expected to be {} type, but found to be of ' '{} type'.format(varname, datatype, vartype), "error") status = False return status status = True datafile = Utils.config_Utils.datafile tc_filepath = os.path.dirname( data_Utils.get_object_from_datarepository('wt_testcase_filepath')) # this block checks if strvar is string type status = check_type(strvar, "strvar", str) and status # this block checks if langs is list type status = check_type(langs, "langs", list) and status # this block checks if states is tuple type status = check_type(states, "states", tuple) and status # this block checks if currencys is dict type status = check_type(currencys, "currencys", dict) and status # this block checks if ramspace is bool type status = check_type(ramspace, "ramspace", bool) and status file_err = '{} is not a file, please check' try: # this checks if configfile and anotherfile are valid files # by getting the absolute path of the file if not os.path.isabs(configfile): configfile = file_Utils.getAbsPath(configfile, tc_filepath) if not os.path.isfile(configfile): pNote(file_err.format(configfile), "error") except AttributeError: pNote('configfile and anotherfile are expected to be files', "error") pNote('type of configfile is {}'.format(type(configfile)), "error") status = False if type(intvar) is str and intvar.startswith('tag'): intvar = data_Utils.resolve_argument_value_to_get_tag_value( datafile, system_name, intvar) else: status = check_type(intvar, "intvar", int) and status return status
def store_in_repo(self, datavar=None, datavalue=None, type='str', filepath=None, jsonkey="repo_variables"): """Stores datavalue in datavar of datarepository :Argument: 1. datavar = Key to be used to store datavalue in data_repository, this could be dot separated to store in nested fashion i.e., if var is k1.k2.k3 then the data value would be stored as a value in datarepository[k1][k2][k3] 2. datavalue = Value to be stored 3. type = Type of datavalue(string/int/float) 4. filepath = Json file where datarepository variables are defined. It is to store multiple key,value pairs in datarepository. 5. jsonkey = The key where all the REPO variables & values are defined in the filepath Sample JSON file: { "repo_variables": { "var1": {"type": "int", "value": "10"}, "var2.var3": {"value": "10"}, "var4.var5": "1" }, "user_defined_tag":{ "var6" : {"type": "int", "value": "40"} } } All three formats in the above sample block are allowed. If 'type' is not provided, value will be converted as string by default. """ def get_dict_to_update(var, val): """ The function creates a dictionary with Variable and value. If Variable has "." separated keys then the value is updated at appropriate level of the nested dictionary. :param var: Dictionary Key or Key separated with "." for nested dict keys. :param val: Value for the Key. :return: Dictionary """ dic = {} if '.' in var: [key, value] = var.split('.', 1) dic[key] = get_dict_to_update(value, val) else: dic[var] = val return dic status = False pass_msg = "Value: {0} is stored in a Key: {1} of Warrior data_repository" if datavar is not None and datavalue is not None: if type == 'int': datavalue = int(datavalue) elif type == 'float': datavalue = float(datavalue) dict_to_update = get_dict_to_update(datavar, datavalue) update_datarepository(dict_to_update) print_info(pass_msg.format(datavalue, datavar)) status = True if filepath is not None: testcasefile_path = get_object_from_datarepository( 'wt_testcase_filepath') try: filepath = getAbsPath(filepath, os.path.dirname(testcasefile_path)) with open(filepath, "r") as json_handle: json_doc = json.load(json_handle) if jsonkey in json_doc: repo_dict = json_doc[jsonkey] for var_key, var_value in list(repo_dict.items()): if isinstance(var_value, dict): if var_value.get('type') == 'int': value = int(var_value['value']) elif var_value.get('type') == 'float': value = float(var_value['value']) else: value = str(var_value['value']) else: value = str(var_value) dict_to_update = get_dict_to_update(var_key, value) update_datarepository(dict_to_update) print_info(pass_msg.format(value, var_key)) else: print_error( 'The {0} file is missing the key ' '\"repo_variables\", please refer to ' 'the Samples in Config_files'.format(filepath)) status = True except ValueError: print_error('The file {0} is not a valid json ' 'file'.format(filepath)) except IOError: print_error('The file {0} does not exist'.format(filepath)) except Exception as error: print_error('Encountered {0} error'.format(error)) if (type is None or datavalue is None) and filepath is None: print_error('Either Provide values to arguments \"datavar\" & ' '\"datavalue\" or to argument \"filepath\"') return status
def build_server(self, datafile, system_name): """ Take in a system and read all its routes Load the routes into Bottle server object Start a thread with the bottle server return the bottle server adapter and server thread """ app = Bottle() # Get system and routes system_data = data_Utils.get_credentials(datafile, system_name) self.datafile = datafile route_file = system_data['mapping_file'] if route_file: route_file = getAbsPath(route_file, getDirName(datafile)) # Loop through each route for route in data_Utils.get_all_system_or_subsystem(route_file): route_name = route.get('name') if route_name[0] != '/': route_name = '/' + route_name # Group request condition with the same method together route_methods = {} for request in route: request_method = request.find('request_method').text.upper() if request_method not in route_methods: route_methods[request_method] = [request] else: route_methods[request_method].append(request) # Build route with the grouped conditions for method_type, same_type_methods in route_methods.items(): # A route can have general response and conditional response specific_res = [] general_res = {} for method in same_type_methods: dict_of_info = {} method_req = {} method_res = {} # Get all info from the condition for info in iter(method): if info.tag in dict_of_info: dict_of_info[info.tag].append(info.text) else: dict_of_info[info.tag] = [info.text] # Extract request/response related info for key, value in dict_of_info.items(): if key in request_verify_list: method_req = {key: value} elif key in response_list: method_res[key] = value if any([ key in request_verify_list for key in dict_of_info.keys() ]): # this condition has request/response pair method_combine = method_req method_combine.update(method_res) specific_res.append(method_req) # this ensure when all verification fail and no general response given # there will be some responses if any([ key in on_fail_response_list for key in dict_of_info.keys() ]): general_res.update(method_res) else: # this condition only has general response general_res.update(method_res) app.route( route_name, method_type, self.build_route(route_name, method_type, specific_res, general_res)) # Build a class to hold the server so it can be closed easily port = 5000 if "port" not in system_data else int(system_data["port"]) server = ServerHandler(host="0.0.0.0", port=port) server_thread = threading.Thread(target=run, kwargs={ "app": app, "server": server, "debug": True }) server_thread.daemon = True server_thread.start() sleep(2) if server_thread.is_alive(): return True, {"server": server, "server_thread": server_thread} else: return False, {}
def execute_robot_wrapper(self, system_name, session_name=None): """ This keyword is to execute python scripts which internally calls robot scripts. :Arguments: 1. system_name(string) - Name of the system/subsystem in the datafile 2. session_name(string) - name of the session to the system :Returns: 1. status(bool)= True/False :Datafile usage: Tags or attributes to be used in input datafile for the system/subsystem If both tag and attribute is provided the attribute will be used 1. ip = IP address of the system where the python script will be executed Default value for ip type is ip, it can take any type of ip's to connect to (like ipv4, ipv6, dns etc) Users can provide tag/attribute for any ip_type under the system in the input datafile and specify the tag/attribute name as the value for ip_type argument, then the connection will be established using that value 2. username = username for the session 3. password = password for the session 4. end_prompt = prompt expected when the command(python script) execution is successful, default value: .*(%|#|\$). 5. remote = 'yes' when executed in remote system & 'no'(default) when executed in local system 6. file_path = path of the python script to be executed 7. output_dir = directory path used as outputdir for robot scripts available in the python script(in execution machine). All the Robot tests listed in the Python script should have same output directory. 8. local_output_dir = path of the directory in the local system where the robot output files from remote system will be copied. If this tag is not available or left empty, results will be stored in 'home/<username>/robot_wrapper_opdir' directory. Note: Tags 1,2,3 & 8 are only required to copy the results from remote to local system when remote(5) argument is set to 'yes'. """ session_id = get_session_id(system_name, session_name) session_object = get_object_from_datarepository(session_id) credentials = get_credentials(self.datafile, system_name, [ 'ip', 'username', 'password', 'end_prompt', 'remote', 'file_path', 'output_dir', 'local_output_dir' ]) if not credentials['file_path'] or not credentials['output_dir']: pNote( "Please provide values for 'file_path & output_dir' " "tags in input data_file", 'warning') return False if credentials['end_prompt']: prompt = credentials['end_prompt'] else: prompt = ".*(%|#|\$)" data_directory = os.path.dirname(self.datafile) abs_filepath = getAbsPath(credentials['file_path'], data_directory) abs_output_dir = getAbsPath(credentials['output_dir'], data_directory) current_time = time.time() if os.path.isfile(abs_filepath): command = "python " + abs_filepath status = session_object.send_command(".*", prompt, command)[0] if status is True: pNote("Robot_wrapper script: '{}' execution is successful". format(abs_filepath)) else: pNote( "Robot_wrapper script: '{}' execution failed".format( abs_filepath), 'warning') else: pNote( "Robot_wrapper script: '{}' does not exist".format( abs_filepath), 'warning') status = False # When executed in remote machine if credentials['remote'] and credentials['remote'].upper() == "YES": if credentials['local_output_dir']: local_output_dir = getAbsPath(credentials['local_output_dir'], data_directory) else: local_output_dir = "~/robot_wrapper_opdir" get_file_from_remote_server(credentials['ip'], credentials['username'], credentials['password'], abs_output_dir, local_output_dir) abs_output_dir = local_output_dir + os.sep + os.path.basename( abs_output_dir) # Get the modified xml files in the output_dir modified_list = get_modified_files(abs_output_dir, current_time, ".xml") # Get the robot xml files from the modified list of files robot_xml_list = robot_wrapper_utils.get_robot_xml_files(modified_list) # Get results from robot xml files robot_test_results = robot_wrapper_utils.get_results_from_robot_xml( robot_xml_list) # Create junit for robot tests robot_wrapper_utils.create_case_junit(robot_test_results) return status
def set_env_var(self, var_key=None, var_value=None, filepath=None, jsonkey="environmental_variables"): """create a temp environment variable the value will only stay for this run :Argument: var_key = key of the environment variable var_value = value of the environment variable filepath = Json file where Environmental variables are defined jsonkey = The key where all the ENV variable & values are defined With jsonkey arg, Users can call same file to set various ENV Variable Variable File : Sample environmental_variable file is available under Warriorspace/Config_file/Samples/Set_ENV_Variable_Sample.json """ status = False if not any([var_key, var_value, filepath]): print_error( 'Either Provide values to arguments \"var_key\" & \"var_value\" or to argument \"filepath\"' ) if var_key is not None and var_value is not None: os.environ[var_key] = var_value if os.environ[var_key] == var_value: print_info('Set ENV variable {0} with value ' '{1}'.format(var_key, var_value)) status = True if filepath is not None: testcasefile_path = get_object_from_datarepository( 'wt_testcase_filepath') try: filepath = getAbsPath(filepath, os.path.dirname(testcasefile_path)) with open(filepath, "r") as json_handle: get_json = json.load(json_handle) if jsonkey in get_json: env_dict = get_json[jsonkey] for var_key, var_value in env_dict.items(): os.environ[var_key] = var_value if os.environ[var_key] == var_value: print_info('Set ENV variable {0} with value ' '{1}'.format(var_key, var_value)) status = True else: print_error( 'The {0} file is missing the key ' '\"environmental_variables\", please refer to ' 'the Samples in Config_files'.format(filepath)) status = False except ValueError: print_error('The file {0} is not a valid json ' 'file'.format(filepath)) status = False except IOError: print_error('The file {0} does not exist'.format(filepath)) status = False except Exception as error: print_error('Encountered {0} error'.format(error)) status = False return status
def get_step_list(filepath, step_tag, sub_step_tag, loop_tag="Loop"): """ Takes the location of Testcase/Suite/Project file as input Returns a list of all the step/testcase/testsuite elements present in the file. :Arguments: 1. filepath = full path of the Testcase/suite/project xml file 2. step_tag = xml tag for group of step in the file 3. sub_step_tag = xml tag for each step in the file 4. loop_tag = xml tag for loop. Loop by default """ step_list_with_rmt_retry = [] root = Utils.xml_Utils.getRoot(filepath) steps = root.find(step_tag) if steps is None: print_warning("The file: '{0}' has no {1} to be executed".format( filepath, step_tag)) step_list = [] for child_node in steps: if child_node.tag == sub_step_tag: step_list.append(child_node) elif child_node.tag == loop_tag: loop_count = child_node.get("id") if loop_count is None: print_error('`id` attribute is mandatory in Loop tag.' ' example : <Loop id="1" file="filename">') return False json_file = child_node.get("file") if json_file is None: print_error('`file` attribute is mandatory in Loop tag.' ' example : <Loop id="1" file="filename">') return False loop_count = loop_count.strip() json_file = json_file.strip() json_file = Utils.data_Utils.sub_from_env_var(json_file) print_info("file is {}".format(json_file)) loop_steps = child_node.findall(sub_step_tag) testcasefile_path = get_object_from_datarepository( 'wt_testcase_filepath') valid_json = True try: filepath = getAbsPath(json_file, os.path.dirname(testcasefile_path)) with open(filepath, "r") as json_handle: json_doc = json.load(json_handle) loop_json = {"loop_json": json_doc} update_datarepository(loop_json) if not isinstance(json_doc, list): valid_json = False print_error( 'invalid json format specified,' 'valid format : [{"arg1":"value"}, {"arg2":"value"}]' ) else: for blob in json_doc: if not isinstance(blob, dict): valid_json = False print_error( "element is {}. should be dict".format( type(blob))) print_error( 'invalid json format specified,' 'blob should be dict, valid format : ' '[{"arg1":"value"}, {"arg2":"value"}]') except ValueError: valid_json = False print_error('The file {0} is not a valid json ' 'file'.format(filepath)) except IOError: valid_json = False print_error('The file {0} does not exist'.format(filepath)) except Exception as error: valid_json = False print_error('Encountered {0} error'.format(error)) if not valid_json: return False for iter_number, _ in enumerate(json_doc): for step_number, loop_step in enumerate(loop_steps): copy_step = copy.deepcopy(loop_step) copy_step.set("loop_id", "Loop:{}-Step:{}-Iter:{}".\ format(loop_count, step_number+1, iter_number+1)) copy_step.set("loop_iter_number", iter_number) arguments = copy_step.find('Arguments') if arguments is not None and arguments is not False: for argument in arguments.findall('argument'): arg_value = argument.get('value') arg_value = Utils.data_Utils.sub_from_loop_json( arg_value, iter_number) argument.set("value", arg_value) step_list.append(copy_step) if root.tag == 'Project' or root.tag == 'TestSuite': step_list = [] orig_step_list = steps.findall(sub_step_tag) for orig_step in orig_step_list: orig_step_path = orig_step.find('path').text if '*' not in orig_step_path: step_list.append(orig_step) # When the file path has asterisk(*), get the Warrior XML testcase/testsuite # files matching the given pattern else: orig_step_abspath = Utils.file_Utils.getAbsPath( orig_step_path, os.path.dirname(filepath)) print_info("Provided {0} path: '{1}' has asterisk(*) in " "it. All the Warrior XML files matching " "the given pattern will be executed.".format( sub_step_tag, orig_step_abspath)) # Get all the files matching the pattern and sort them by name all_files = sorted(glob.glob(orig_step_abspath)) # Get XML files xml_files = [fl for fl in all_files if fl.endswith('.xml')] step_files = [] # Get Warrior testcase/testsuite XML files for xml_file in xml_files: root = Utils.xml_Utils.getRoot(xml_file) if root.tag.upper() == sub_step_tag.upper(): step_files.append(xml_file) # Copy the XML object and set the filepath as path value for # all the files matching the pattern if step_files: for step_file in step_files: new_step = copy.deepcopy(orig_step) new_step.find('path').text = step_file step_list.append(new_step) print_info("{0}: '{1}' added to the execution " "list ".format(sub_step_tag, step_file)) else: print_warning( "Asterisk(*) pattern match failed for '{}' due " "to at least one of the following reasons:\n" "1. No files matched the given pattern\n" "2. Invalid testcase path is given\n" "3. No testcase XMLs are available\n" "Given path will be used for the Warrior " "execution.".format(orig_step_abspath)) step_list.append(orig_step) # iterate all steps to get the runmode and retry details for _, step in enumerate(step_list): runmode, value, _ = get_runmode_from_xmlfile(step) retry_type, _, _, retry_value, _ = get_retry_from_xmlfile(step) if runmode is not None and value > 0: go_next = len(step_list_with_rmt_retry) + value + 1 step_list_with_rmt_retry = append_step_list( step_list_with_rmt_retry, step, value, go_next, mode="runmode", tag="value") if retry_type is not None and retry_value > 0: go_next = len(step_list_with_rmt_retry) + retry_value + 1 if runmode is not None: get_runmode = step.find('runmode') step.remove(get_runmode) step_list_with_rmt_retry = append_step_list( step_list_with_rmt_retry, step, retry_value, go_next, mode="retry", tag="count") if retry_type is None and runmode is None: step_list_with_rmt_retry.append(step) return step_list_with_rmt_retry
def dict_in_tuple(element): """ This function evaluates the value of the files tag and returned validated data. input: string (path/to/file5.txt;content_type;(header1=value;header2=value2)), (path/to/file1.txt;content_type;(header3=value3;header4=value4)) output: dict { file5.txt: (file5.txt, open file5.txt (in binary mode), content_type, { header1: value, header2: value2 }), file1.txt: (file1.txt, open file5.txt (in binary mode), content_type, { header3: value3, header4: value4 }) } """ final_dict = {} temp_list = [] element = element.strip("))") element = element.strip("(") if element is not None and element is not False and element != "": element = element.split(";(") if element[0] is None or element[0] is False: pNote("File path cannot be {0}!".format(element), "error") elif element[0] == "": pNote("File path cannot be empty!", "error") else: element[0] = element[0].split(";") for j in range(0, len(element[0])): element[0][j] = element[0][j].strip() if element[0][0] is None or element[0][0] is False: pNote("File path cannot be {0}!".format(element), "error") elif element[0][0] == "": pNote("File path cannot be empty!", "error") else: abs_path = file_Utils.getAbsPath(element[0][0], sys.path[0]) if os.path.exists(abs_path): temp_list.append( os.path.basename(os.path.normpath(element[0][0]))) temp_list.append(open(abs_path, 'rb')) else: pNote("{0} doesn't exist!".format(element[0]), "error") if len(element[0]) > 1: if element[0][1] is not None and element[0][1] is not False \ and element[0][1] != "": temp_list.append(element[0][1]) if len(element) > 1: if element[1] is not None and element[1] is not False \ and element[1] != "": temp_list.append(convert_string_to_dict( element[1])) else: if len(element) > 1: if element[1] is not None and element[1] is not False \ and element[1] != "": temp_list.append("") temp_list.append(convert_string_to_dict(element[1])) if temp_list != []: if len(temp_list) < 3: final_dict[temp_list[0]] = (temp_list[0], temp_list[1]) elif len(temp_list) > 3: final_dict[temp_list[0]] = (temp_list[0], temp_list[1], temp_list[2], temp_list[3]) elif len(temp_list) > 2: final_dict[temp_list[0]] = (temp_list[0], temp_list[1], temp_list[2]) else: if element == "": pNote("File path cannot be empty!", "error") else: pNote("File path cannot be {0}!".format(element), "error") return final_dict
def dict_with_file_paths(element): """ This function evaluates the value of the files tag and returned validated data. input: string file_group_name=path/to/file2.txt; path/to/file3.txt output: dict { file_group_name_1: open file2.txt (in binary mode), file_group_name_2: open file3.txt (in binary mode), } """ final_dict = {} temp_list = [] if element is not None and element is not False and element != "": element = element.split("=") for i in range(0, len(element)): element[i] = element[i].strip() if len(element) < 2: pNote("File paths cannot be empty!", "error") else: if element[0] == "": pNote("File group name cannot be empty!", "error") elif element[0] is None or element[0] is False: pNote("File group name cannot be {0}!".format(element[0]), "error") else: if element[1] == "": pNote("File paths cannot be empty!", "error") elif element[1] is None or element[1] is False: pNote("File paths cannot be {0}!".format(element[0]), "error") else: element[1] = element[1].split(";") for i in range(0, len(element[1])): element[1][i] = element[1][i].strip() if element[1][i] is not None and element[1][i] is not \ False and element[1][i] != "": abs_path = file_Utils.getAbsPath( element[1][i], sys.path[0]) if os.path.exists(abs_path): temp_list.append(open(element[1][i], 'rb')) else: pNote( "{0} doesn't exist!".format(element[1][i]), "error") else: if element[1][i] == "": pNote("File path cannot be empty!", "error") else: pNote( "File path cannot be {0}!".format( element[1][i]), "error") if temp_list != []: for i in range(0, len(temp_list)): final_dict[element[0] + "_" + str(i + 1)] = temp_list[i] else: if element == "": pNote( "File group name and corresponding file paths cannot " "be empty!", "error") else: pNote( "File group name and corresponding file paths cannot" " be {0}!".format(element), "error") return final_dict