def _code(parent, string): index = 0 # Replace multiline markers string = string.replace('\\\\n', '').replace('\\\\N', '\\\\n') # If syntax highlighter explictly turned off (like: for output) if string.startswith('\\\\OFF'): parent.string = string[5:].strip() # If code is syntax highlighted else: # Parse code for match in re_finditer(C_SYNTAX, string): start, stop = match.span() new(parent, 'span', string=string[index:start]) # Process matches in order for token in C_TOKENS: # If a token found it to parent and stop if match.group(token): new(parent, 'span', class_='code_' + token, string=string[start:stop]) break index = stop # Add the rest to parent new(parent, 'span', string=string[index:])
def parse_test_steps(contents, test_steps, test_name, dict_test_case, test_dir): global ERROR_LIST test_steps_re = ( r'("|\'){3}\s*(?:Step:|STEP:|\s+\d+\.)\s*(?P<description>.*?)' r'(?:(?:Result:|RESULT:)\s*(?P<result>.*?)|)("|\'){3}') function_re = (r'def\s{}\(.*?\):(?:\s+#\snoqa)?\n' r'(?P<test_case>.*?)(?=def\s|$)') # collect all test steps re_steps = re_finditer(test_steps_re, contents, flags=DOTALL) step_count = 1 for re_step in re_steps: step = parse_test_step(re_step, test_name, step_count) # check for stub if not dict_test_case['contains_stub'] and \ step['description'].lower() == "stub test step": dict_test_case['contains_stub'] = True re_result = re_match(r'<(\S+)>', step['description']) if re_result is not None: func_name = re_result.group(1) file = get_referenced_file_path(test_dir, step['result']) if os.path.exists(file) and os.path.isfile(file): with open(file, 'r') as file_pointer: contents = file_pointer.read() re_result = re_findall(function_re.format(func_name), contents, flags=DOTALL) if re_result: func_contents = re_result[0] sub_step_name = "{} {} {}".format(test_name, step_count, func_name) parse_test_steps(func_contents, test_steps, sub_step_name, dict_test_case, test_dir) else: ERROR_LIST["TF23"] = ("Referenced function {} does not " "exist!".format(func_name)) else: ERROR_LIST["TF22"] = ("Referenced file {} does not " "exist!".format(file)) else: test_steps.append(step) step_count += 1
def _search(collected, content_pattern, newline_pattern, string, filepath, marks): # Collect all locations of new lines in string line = 0 newlines = [match.start() for match in re_finditer(newline_pattern, string)] # Remove all data of file if exists del collected[filepath] # For each match in for match in re_finditer(content_pattern, string): # Character-index where the match starts start = match.start() # Get matching groups ml, mb, mc = match.group('line', 'block', 'content') content = [] space = len(match.group('space')) msc = match.start('content') # If match line-comment if ml: _format(storage = content, string = mc, pattern = re_compile(r'\n([^\S\n]*{}[^\S\n]*)'.format(ml)), padding = space + (msc - match.start('line'))) # If match block-comment else: _format(storage = content, string = mc[:-len(mb)], pattern = _BREAK, padding = space + (msc - match.start('block'))) # Format uppercase tag word, mark = match.group('word', 'mark') tag = (word or marks[mark]).upper() # Get line-number line = next(l for l, c in enumerate(newlines[line:], start=line + 1) if c > start) # Store data collected.setdefault(filepath, tag, []).append((line, content)) print('CCOM: processed {!r}'.format(filepath))
def handle_privmsg(self, source: Optional[str], args: List[str]) -> None: real_source = cast(str, source) user_source = core.irc_packet.IrcUserSource.from_source_string( real_source) target = user_source.nick if args[0] == self.bot.nick else args[0] message = args[1] for entry in self.config["patterns"]: if re_match(entry["sender_pattern"], real_source): for match in re_finditer(entry["word_pattern"], message): start, end = match.span() print(message[start:end]) self.bot.send_message( target, re_sub(entry["word_pattern"], entry["response"], message[start:end]))
def _str(parent, string): index = 0 # Get each match in string for match in re_finditer(MD_SYNTAX, string): # Test in order which expression had match for token, function in MD_TOKENS.items(): if match.group(token): start, stop = match.span(token) # Add the "rest" between the previous # and this match to the parent tag new(parent, 'span', string=string[index:start]) # Build new entity based on expression tag = function(match, parent) # Parse the string of the expression recursively if tag: _str(tag, match.group(token + '_txt')) break index = stop # Add rest to parent new(parent, 'span', string=string[index:])
def rewrite_bdes_ident_to_bsls(file: str) -> None: """Replace all occurrences of "bdes_ident" with "bsls_ident" in the specified `file`. Args: file (str): an absolute or relative path to a file Returns: None """ with open(file, "r+b") as f, mm_mmap(f.fileno(), 0) as filemap: regex = b'(?P<LOWER_CASE>bdes_ident)|(?P<UPPER_CASE>BDES_IDENT)' compiled_regex = re_compile(regex) # mmap objects satisfy the bytearray interface filemap_bytearray = ty_cast(bytearray, filemap) for match in re_finditer(compiled_regex, filemap_bytearray): group = match.lastgroup if group == 'LOWER_CASE': filemap[match.start():match.end()] = b'bsls_ident' else: assert group == 'UPPER_CASE' filemap[match.start():match.end()] = b'BSLS_IDENT' filemap.flush()
def parse_test_file(file_contents, file_dir): """ This method parses through the test file to make sure that the file is formatted correctly and contains all of the necessary information for generating/updating an MD document or ALM test case. :param str file_contents: The file contents to be parsed :param str file_dir: the directory where the test file lives :return dict: A dictionary containing the parsed information from the file in the form of: :: { 'Author': { 'name': 'Name LastName', 'email': '*****@*****.**' }, 'Scripter': None, 'TestId': 0, 'Release': '', 'TestName': 'Mirroring_LAG', 'TestAttributes': '', 'SubArea': '', 'Feature': '', 'Objective': ( 'Verify mirroringfunctionality with LAG' ), 'Requirements': ( ' -2 Ridley switches\n' ' -3 traffic gen/analyzer ports\n' ' -1 IxNetwork capable host workstation' ), 'TestDescription': ( 'Setup a mirroring session using LAG as the mirror source and ' 'destination and run traffic. Test using unicast, multicast, ' 'and broadcast traffic. Run multiple streams, varing mac ' 'source/dest addresses to load balance on the lag. Run mirror ' 'for egress only, ingress only, and both.' ), 'PlanPriority': '3 - High', 'TestPassCriteria': 'All steps pass' 'PlatformIndependent': 'N', 'SupportedPlatforms': '8408ml;AS5712', 'Topology': ( ' +-------+ +-------+\n' ' | | | |\n' ' | ix1 | | ix2 |\n' ' | | | |\n' ' +---+---+ +---+---+\n' ' | | +-------+\n' ' | | | hs1 |\n' ' +---+---+ (lag) +---+---+ +-------+\n' ' | |-------| |\n' ' | sw1 | | sw2 |\n' ' | |-------| |\n' ' +---+---+ +-------+\n' ' |\n' ' |\n' ' +---+---+\n' ' | |\n' ' | ix3 |\n' ' | |\n' ' +-------+' ), 'TOPOLOGY': , 'Steps': [ { 'name': 'mirroring_lag_configure_1', 'description': 'config vlan 100 on sw1 and sw2', 'result': 'vlans configured' }, ... { 'name': 'mirroring_lag_rx_broadcast_traffic_1', 'description': ( 'configure a mirror session on switch 1 to ' 'capture RX traffic' ), 'result': ( 'sw1 if01 source port, lag interface is dest, ' 'mirror configured' ) }, ], 'test_marks': {}, 'automated': "Yes", 'test_suited_for_ostl': True, 'crs_referenced': [], 'contains_stub': False, } """ global ERROR_LIST # Make sure error list is empty ERROR_LIST = {} test_func_re = (r'(?P<test_marks>(?:@(?:pytest\.)?mark\.[^\n]+\s*)+)?\n' r'def (?P<test_func>test_[^\(]+)\([^\)]*\):\s+' r'(?P<test_case>.+?)(?=\n@|\ndef |$)') env_setup_re = (r'def env_setup\(.*?\):(?: +# noqa)?\n' r'(?P<test_case>.*?)(?=def |$)') skip_marks_re = [ r'mark\.skip\((?:reason=|)[\"\'](In Progress|Dev Funnel)[\'\"]\)', r'mark\.skip\((?:reason=|)[\"\']Not Feasible[\'\"]\)', r'mark\.skip\((?:reason=|)[\"\'].*[\'\"]\)' ] platform_incompatible_marks_re = ( r'mark\.platform_incompatible\(\[\'ostl\'\]\)') dos_line_endings_re = r'\r\n' if re_search(dos_line_endings_re, file_contents): ERROR_LIST["TF20"] = ( "File using DOS line endings, expected UNIX line endings") # switch to unix line endings so rest of parser can run file_contents = re_sub(dos_line_endings_re, "\n", file_contents) dict_test_case = {} dict_test_case['contains_stub'] = False dict_test_case.update(parse_test_header(file_contents)) stubbed_keys = ['Objective', 'Requirements', 'TestPassCriteria'] for key in stubbed_keys: if key in dict_test_case and dict_test_case[key].lower() == "stub": dict_test_case['contains_stub'] = True test_steps = [] max_num_env_setup = 1 re_result = re_findall(env_setup_re, file_contents, flags=DOTALL) if len(re_result) > max_num_env_setup: ERROR_LIST["TF21"] = "There are too many declarations of env_setup" if re_result: test_case = re_result[0] test_name = "env_setup" ERROR_LIST["TF18"] = "" # collect all test steps parse_test_steps(test_case, test_steps, test_name, dict_test_case, file_dir) if ERROR_LIST["TF18"]: ERROR_LIST["TF18"] = ERROR_LIST["TF18"].strip() else: del ERROR_LIST["TF18"] re_result = re_findall(r'def test_', file_contents) if not re_result: ERROR_LIST["TF15"] = "Unable to find any test_ functions" assert_errors() num_test_expected = len(re_result) re_result = re_findall(r'def test_(?P<test_func>\S+)\(.*?\)', file_contents, flags=DOTALL) if not re_result: ERROR_LIST["TF16"] = "Unable to find any valid test_ functions" assert_errors() re_results = re_finditer(test_func_re, file_contents, flags=DOTALL) if not re_results: ERROR_LIST["TF16"] = "Unable to find any valid test_ functions" assert_errors() num_test_found = 0 for re_result in re_results: num_test_found += 1 partial = re_result.groupdict() # get test step name test_name = partial['test_func'] test_marks = partial['test_marks'] if "TF18" not in ERROR_LIST.keys(): ERROR_LIST["TF18"] = "" # collect all test steps parse_test_steps(partial['test_case'], test_steps, test_name, dict_test_case, file_dir) if not test_steps: ERROR_LIST["TF17"] = ( "Test function {} does not contain any valid STEP strings" ).format(test_name) if ERROR_LIST["TF18"]: ERROR_LIST["TF18"] = ERROR_LIST["TF18"].strip() else: del ERROR_LIST["TF18"] if 'test_marks' not in dict_test_case: dict_test_case['test_marks'] = {} if test_marks: dict_test_case['test_marks'][test_name] = \ ';'.join(test_marks.split('\n')) if 'test_funcs' not in dict_test_case: dict_test_case['test_funcs'] = [] dict_test_case['test_funcs'].append(test_name) # look at pytest marks for skip markers if (test_marks and re_search(skip_marks_re[0], test_marks)): # skip marker found for "In Progress" if 'automated' not in dict_test_case: dict_test_case['automated'] = "Dev Funnel" elif (test_marks and re_search(skip_marks_re[1], test_marks)): # skip marker found for "Not Feasible" if 'automated' not in dict_test_case: dict_test_case['automated'] = "Not Feasible" else: # no skip marker found, at least 1 test step automated dict_test_case['automated'] = "Yes" # look at pytest marks for platform incompatible markers if (test_marks and re_search(platform_incompatible_marks_re, test_marks)): # platform incompatible marker found,at least 1 test step # not compatible with OSTL dict_test_case['test_suited_for_ostl'] = 'N' else: # no marker found if 'test_suited_for_ostl' not in dict_test_case: dict_test_case['test_suited_for_ostl'] = 'Y' if num_test_found != num_test_expected: ERROR_LIST["TF19"] = ( "Found {} valid test_ functions. Expected {}.").format( num_test_found, num_test_expected) dict_test_case['Steps'] = test_steps # Search the file for any references to CRs dict_test_case['crs_referenced'] = [] cr_re = r'CR\s*\d{4,}' re_results = re_finditer(cr_re, file_contents, flags=IGNORECASE) for re_result in re_results: cr = re_sub(r'\s+', '', re_result.group(0)).upper() # make form CR#### if cr not in dict_test_case['crs_referenced']: dict_test_case['crs_referenced'].append(cr) assert_errors() return dict_test_case
def modify_message(self, tab, msg_as_string): ph_matchnum_txt = tab.param_handl_txtfield_match_indices.getText() ph_target_exp = tab.get_exp_pane_expression(tab.param_handl_exp_pane_target ) ph_extract_static_exp = tab.get_exp_pane_expression(tab.param_handl_exp_pane_extract_static) ph_extract_single_exp = tab.get_exp_pane_expression(tab.param_handl_exp_pane_extract_single) ph_extract_macro_exp = tab.get_exp_pane_expression(tab.param_handl_exp_pane_extract_macro ) ph_extract_cached_exp = tab.get_exp_pane_expression(tab.param_handl_exp_pane_extract_cached) if not ph_target_exp: self.logger.warning( 'No match expression specified! Skipping tab "{}".'.format( tab.namepane_txtfield.getText() ) ) return msg_as_string exc_invalid_regex = 'Skipping tab "{}" due to error in expression {{}}: {{}}'.format( tab.namepane_txtfield.getText() ) try: match_exp = re_compile(ph_target_exp) except re_error as e: self.logger.error(exc_invalid_regex.format(ph_target_exp, e)) return msg_as_string # The following code does not remove support for groups, # as the original expression will be used for actual replacements. # We simply need an expression without capturing groups to feed into re.findall(), # which enables the logic for granular control over which match indices to target. # Removing named groups to normalize capturing groups. findall_exp = re_sub('\?P<.+?>', '', ph_target_exp) # Removing capturing groups to search for full matches only. findall_exp = re_sub(r'(?<!\\)\(([^?]*?)(?<!\\)\)', '\g<1>', findall_exp) findall_exp = re_compile(findall_exp) self.logger.debug('findall_exp: {}'.format(findall_exp.pattern)) all_matches = re_findall(findall_exp, msg_as_string) self.logger.debug('all_matches: {}'.format(all_matches)) match_count = len(all_matches) if not match_count: self.logger.warning( 'Skipping tab "{}" because this expression found no matches: {}'.format( tab.namepane_txtfield.getText(), ph_target_exp ) ) return msg_as_string matches = list() dyn_values = '' replace_exp = ph_extract_static_exp if tab.param_handl_dynamic_chkbox.isSelected(): find_exp, target_txt = '', '' selected_item = tab.param_handl_combo_extract.getSelectedItem() if selected_item == tab.PARAM_HANDL_COMBO_EXTRACT_CACHED: find_exp, target_txt = ph_extract_cached_exp, tab.param_handl_cached_resp_viewer.getMessage() target_txt = self.helpers.bytesToString(target_txt) elif selected_item == tab.PARAM_HANDL_COMBO_EXTRACT_SINGLE: self.issue_request(tab) find_exp, target_txt = ph_extract_single_exp, self.helpers.bytesToString(tab.response) elif selected_item == tab.PARAM_HANDL_COMBO_EXTRACT_MACRO: find_exp, target_txt = ph_extract_macro_exp, self.final_macro_resp if not find_exp: self.logger.warning( 'No dynamic value extraction expression specified! Skipping tab "{}".'.format( tab.namepane_txtfield.getText() ) ) return msg_as_string try: # Making a list to enable multiple iterations. matches = list(re_finditer(find_exp, target_txt)) except re_error as e: self.logger.error(exc_invalid_regex.format(ph_extract_macro_exp, e)) return msg_as_string if not matches: self.logger.warning('Skipping tab "{}" because this expression found no matches: {}'.format( tab.namepane_txtfield.getText(), find_exp )) return msg_as_string groups = {} groups_keys = groups.viewkeys() for match in matches: gd = match.groupdict() # The given expression should have unique group matches. for k in gd.keys(): if k in groups_keys: self.logger.warning('Skipping tab "{}" because this expression found ambiguous matches: {}'.format( tab.namepane_txtfield.getText(), find_exp )) return msg_as_string groups.update(gd) # Remove '$' not preceded by '\' exp = re_sub(r'(?<!\\)\$', '', ph_target_exp) flags = re_match('\(\?[Limuxs]{1,6}\)', ph_target_exp) if flags is not None and 'x' in flags.group(0): exp += '\n' groups_exp = ''.join(['(?P<{}>{})'.format(group_name, group_match) for group_name, group_match in groups.items()]) dyn_values = ''.join(groups.values()) # No need for another try/except around this re.compile(), # as ph_target_exp was already checked when compiling match_exp earlier. # match_exp = re_compile(exp + groups_exp + end) match_exp = re_compile(exp + groups_exp) self.logger.debug('match_exp adjusted to:\n{}'.format(match_exp.pattern)) subsets = ph_matchnum_txt.replace(' ', '').split(',') match_indices = [] for subset in subsets: try: if ':' in subset: sliceindex = subset.index(':') start = int(subset[:sliceindex ]) end = int(subset[ sliceindex+1:]) if start < 0: start = match_count + start if end < 0: end = match_count + end for match_index in range(start, end): match_indices.append(match_index) else: match_index = int(subset) if match_index < 0: match_index = match_count + match_index match_indices.append(match_index) except ValueError as e: self.logger.error( 'Ignoring invalid match index or slice on tab "{}" due to {}'.format( tab.namepane_txtfield.getText(), e ) ) continue match_indices = set(sorted([m for m in match_indices if m < match_count])) self.logger.debug('match_indices: {}'.format(match_indices)) # Using findall_exp to avoid including capture groups in the result. message_parts = re_split(findall_exp, msg_as_string) self.logger.debug('message_parts: {}'.format(message_parts)) # The above strategy to use re.split() in order to enable the usage of match_indices # ends up breaking non-capturing groups. At this point, however, we can safely remove # all non-capturing groups and everything will be peachy. ncg_exp = re_compile('\(\?[^P].+?\)') if re_search(ncg_exp, match_exp.pattern) is not None: match_exp = re_compile(ncg_exp.sub('', match_exp.pattern)) if flags is not None: match_exp = re_compile(flags.group(0) + match_exp.pattern) self.logger.debug('match_exp adjusted to:\n{}'.format(match_exp.pattern)) modified_message = '' remaining_indices = list(match_indices) for part_index, message_part in enumerate(message_parts): if remaining_indices and part_index == remaining_indices[0]: try: final_value = match_exp.sub(replace_exp, all_matches[part_index] + dyn_values) except (re_error, IndexError) as e: self.logger.error(exc_invalid_regex.format(match_exp.pattern + ' or expression ' + replace_exp, e)) return msg_as_string self.logger.debug('Found:\n{}\nreplaced using:\n{}\nin string:\n{}'.format( match_exp.pattern, replace_exp, all_matches[part_index] + dyn_values )) final_value = message_part + final_value modified_message += final_value remaining_indices.pop(0) elif part_index < match_count: modified_message += message_part + all_matches[part_index] else: modified_message += message_part return modified_message