def get_function_body(args, filename, clang_args, extra_commands, prefixes, triple_in_cmd, func_dict): # TODO Clean up duplication of asm/common build_function_body_dictionary # Invoke external tool and extract function bodies. raw_tool_output = common.invoke_tool(args.clang, clang_args, filename) for extra_command in extra_commands: extra_args = shlex.split(extra_command) with tempfile.NamedTemporaryFile() as f: f.write(raw_tool_output.encode()) f.flush() if extra_args[0] == 'opt': if args.opt is None: print(filename, 'needs to run opt. ' 'Please specify --llvm-bin or --opt', file=sys.stderr) sys.exit(1) extra_args[0] = args.opt raw_tool_output = common.invoke_tool(extra_args[0], extra_args[1:], f.name) if '-emit-llvm' in clang_args: common.build_function_body_dictionary(common.OPT_FUNCTION_RE, common.scrub_body, [], raw_tool_output, prefixes, func_dict, args.verbose, args.function_signature) else: print( 'The clang command line should include -emit-llvm as asm tests ' 'are discouraged in Clang testsuite.', file=sys.stderr) sys.exit(1)
def get_function_body(args, filename, clang_args, prefixes, triple_in_cmd, func_dict): # TODO Clean up duplication of asm/common build_function_body_dictionary # Invoke external tool and extract function bodies. raw_tool_output = common.invoke_tool(args.clang, clang_args, filename) if '-emit-llvm' in clang_args: common.build_function_body_dictionary( common.OPT_FUNCTION_RE, common.scrub_body, [], raw_tool_output, prefixes, func_dict, args.verbose) else: print('The clang command line should include -emit-llvm as asm tests ' 'are discouraged in Clang testsuite.', file=sys.stderr) sys.exit(1)
def _get_block_infos(run_infos, test_path, args, common_prefix): # noqa """ For each run line, run the tool with the specified args and collect the output. We use the concept of 'blocks' for uniquing, where a block is a series of lines of text with no more than one newline character between each one. For example: This is one block This is another block This is yet another block We then build up a 'block_infos' structure containing a dict where the text of each block is the key and a list of the sets of prefixes that may generate that particular block. This then goes through a series of transformations to minimise the amount of CHECK lines that need to be written by taking advantage of common prefixes. """ def _block_key(tool_args, prefixes): """ Get a hashable key based on the current tool_args and prefixes. """ return ' '.join([tool_args] + prefixes) all_blocks = {} max_block_len = 0 # A cache of the furthest-back position in any block list of the first # instance of each block, indexed by the block itself. farthest_indexes = defaultdict(int) # Run the tool for each run line to generate all of the blocks. for prefixes, tool_args in run_infos: key = _block_key(tool_args, prefixes) raw_tool_output = common.invoke_tool(args.llvm_mca_binary, tool_args, test_path) # Replace any lines consisting of purely whitespace with empty lines. raw_tool_output = '\n'.join(line if line.strip() else '' for line in raw_tool_output.splitlines()) # Split blocks, stripping all trailing whitespace, but keeping preceding # whitespace except for newlines so that columns will line up visually. all_blocks[key] = [ b.lstrip('\n').rstrip() for b in raw_tool_output.split('\n\n') ] max_block_len = max(max_block_len, len(all_blocks[key])) # Attempt to align matching blocks until no more changes can be made. made_changes = True while made_changes: made_changes = _align_matching_blocks(all_blocks, farthest_indexes) # If necessary, pad the lists of blocks with empty blocks so that they are # all the same length. for key in all_blocks: len_to_pad = max_block_len - len(all_blocks[key]) all_blocks[key] += [''] * len_to_pad # Create the block_infos structure where it is a nested dict in the form of: # block number -> block text -> list of prefix sets block_infos = defaultdict(lambda: defaultdict(list)) for prefixes, tool_args in run_infos: key = _block_key(tool_args, prefixes) for block_num, block_text in enumerate(all_blocks[key]): block_infos[block_num][block_text].append(set(prefixes)) # Now go through the block_infos structure and attempt to smartly prune the # number of prefixes per block to the minimal set possible to output. for block_num in range(len(block_infos)): # When there are multiple block texts for a block num, remove any # prefixes that are common to more than one of them. # E.g. [ [{ALL,FOO}] , [{ALL,BAR}] ] -> [ [{FOO}] , [{BAR}] ] all_sets = [s for s in block_infos[block_num].values()] pruned_sets = [] for i, setlist in enumerate(all_sets): other_set_values = set([ elem for j, setlist2 in enumerate(all_sets) for set_ in setlist2 for elem in set_ if i != j ]) pruned_sets.append([s - other_set_values for s in setlist]) for i, block_text in enumerate(block_infos[block_num]): # When a block text matches multiple sets of prefixes, try removing any # prefixes that aren't common to all of them. # E.g. [ {ALL,FOO} , {ALL,BAR} ] -> [{ALL}] common_values = set.intersection(*pruned_sets[i]) if common_values: pruned_sets[i] = [common_values] # Everything should be uniqued as much as possible by now. Apply the # newly pruned sets to the block_infos structure. # If there are any blocks of text that still match multiple prefixes, # output a warning. current_set = set() for s in pruned_sets[i]: s = sorted(list(s)) if s: current_set.add(s[0]) if len(s) > 1: _warn('Multiple prefixes generating same output: {} ' '(discarding {})'.format(','.join(s), ','.join(s[1:]))) if block_text and not current_set: raise Error( 'block not captured by existing prefixes:\n\n{}'.format( block_text)) block_infos[block_num][block_text] = sorted(list(current_set)) # If we have multiple block_texts, try to break them down further to avoid # the case where we have very similar block_texts repeated after each # other. if common_prefix and len(block_infos[block_num]) > 1: # We'll only attempt this if each of the block_texts have the same number # of lines as each other. same_num_Lines = (len( set( len(k.splitlines()) for k in block_infos[block_num].keys())) == 1) if same_num_Lines: breakdown = _break_down_block(block_infos[block_num], common_prefix) if breakdown: block_infos[block_num] = breakdown return block_infos
def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( '--llc-binary', default='llc', help='The "llc" binary to use to generate the test case') parser.add_argument('--function', help='The function in the test file to update') parser.add_argument( '--extra_scrub', action='store_true', help= 'Always use additional regex to further reduce diffs between various subtargets' ) parser.add_argument( '--x86_scrub_rip', action='store_true', default=True, help= 'Use more regex for x86 matching to reduce diffs between various subtargets' ) parser.add_argument('--no_x86_scrub_rip', action='store_false', dest='x86_scrub_rip') parser.add_argument('tests', nargs='+') args = common.parse_commandline_args(parser) script_name = os.path.basename(__file__) test_paths = [ test for pattern in args.tests for test in glob.glob(pattern) ] for test in test_paths: with open(test) as f: input_lines = [l.rstrip() for l in f] first_line = input_lines[0] if input_lines else "" if 'autogenerated' in first_line and script_name not in first_line: common.warn( "Skipping test which wasn't autogenerated by " + script_name, test) continue if args.update_only: if not first_line or 'autogenerated' not in first_line: common.warn("Skipping test which isn't autogenerated: " + test) continue triple_in_ir = None for l in input_lines: m = common.TRIPLE_IR_RE.match(l) if m: triple_in_ir = m.groups()[0] break run_lines = common.find_run_lines(test, input_lines) run_list = [] for l in run_lines: if '|' not in l: common.warn('Skipping unparseable RUN line: ' + l) continue commands = [cmd.strip() for cmd in l.split('|', 1)] llc_cmd = commands[0] llc_tool = llc_cmd.split(' ')[0] triple_in_cmd = None m = common.TRIPLE_ARG_RE.search(llc_cmd) if m: triple_in_cmd = m.groups()[0] march_in_cmd = None m = common.MARCH_ARG_RE.search(llc_cmd) if m: march_in_cmd = m.groups()[0] filecheck_cmd = '' if len(commands) > 1: filecheck_cmd = commands[1] common.verify_filecheck_prefixes(filecheck_cmd) if llc_tool != 'llc': common.warn('Skipping non-llc RUN line: ' + l) continue if not filecheck_cmd.startswith('FileCheck '): common.warn('Skipping non-FileChecked RUN line: ' + l) continue llc_cmd_args = llc_cmd[len(llc_tool):].strip() llc_cmd_args = llc_cmd_args.replace('< %s', '').replace('%s', '').strip() if test.endswith('.mir'): llc_cmd_args += ' -x mir' check_prefixes = [ item for m in common.CHECK_PREFIX_RE.finditer(filecheck_cmd) for item in m.group(1).split(',') ] if not check_prefixes: check_prefixes = ['CHECK'] # FIXME: We should use multiple check prefixes to common check lines. For # now, we just ignore all but the last. run_list.append( (check_prefixes, llc_cmd_args, triple_in_cmd, march_in_cmd)) if test.endswith('.mir'): comment_sym = '#' check_indent = ' ' else: comment_sym = ';' check_indent = '' autogenerated_note = (comment_sym + ADVERT + 'utils/' + script_name) func_dict = {} for p in run_list: prefixes = p[0] for prefix in prefixes: func_dict.update({prefix: dict()}) for prefixes, llc_args, triple_in_cmd, march_in_cmd in run_list: common.debug('Extracted LLC cmd:', llc_tool, llc_args) common.debug('Extracted FileCheck prefixes:', str(prefixes)) raw_tool_output = common.invoke_tool(args.llc_binary, llc_args, test) triple = triple_in_cmd or triple_in_ir if not triple: triple = asm.get_triple_from_march(march_in_cmd) asm.build_function_body_dictionary_for_triple( args, raw_tool_output, triple, prefixes, func_dict) is_in_function = False is_in_function_start = False func_name = None prefix_set = set([prefix for p in run_list for prefix in p[0]]) common.debug('Rewriting FileCheck prefixes:', str(prefix_set)) output_lines = [] output_lines.append(autogenerated_note) for input_line in input_lines: if is_in_function_start: if input_line == '': continue if input_line.lstrip().startswith(';'): m = common.CHECK_RE.match(input_line) if not m or m.group(1) not in prefix_set: output_lines.append(input_line) continue # Print out the various check lines here. asm.add_asm_checks(output_lines, check_indent + ';', run_list, func_dict, func_name) is_in_function_start = False if is_in_function: if common.should_add_line_to_output(input_line, prefix_set): # This input line of the function body will go as-is into the output. output_lines.append(input_line) else: continue if input_line.strip() == '}': is_in_function = False continue # Discard any previous script advertising. if input_line.startswith(comment_sym + ADVERT): continue # If it's outside a function, it just gets copied to the output. output_lines.append(input_line) m = common.IR_FUNCTION_RE.match(input_line) if not m: continue func_name = m.group(1) if args.function is not None and func_name != args.function: # When filtering on a specific function, skip all others. continue is_in_function = is_in_function_start = True common.debug('Writing %d lines to %s...' % (len(output_lines), test)) with open(test, 'wb') as f: f.writelines( ['{}\n'.format(l).encode('utf-8') for l in output_lines])
def main(): from argparse import RawTextHelpFormatter parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter) parser.add_argument('-v', '--verbose', action='store_true', help='Show verbose output') parser.add_argument('--opt-binary', default='opt', help='The opt binary used to generate the test case') parser.add_argument('--function', help='The function in the test file to update') parser.add_argument('-u', '--update-only', action='store_true', help='Only update test if it was already autogened') parser.add_argument('tests', nargs='+') args = parser.parse_args() script_name = os.path.basename(__file__) autogenerated_note = (ADVERT + 'utils/' + script_name) opt_basename = os.path.basename(args.opt_binary) if (opt_basename != "opt"): common.error('Unexpected opt name: ' + opt_basename) sys.exit(1) test_paths = [ test for pattern in args.tests for test in glob.glob(pattern) ] for test in test_paths: if args.verbose: print('Scanning for RUN lines in test file: %s' % (test, ), file=sys.stderr) with open(test) as f: input_lines = [l.rstrip() for l in f] first_line = input_lines[0] if input_lines else "" if 'autogenerated' in first_line and script_name not in first_line: common.warn("Skipping test which wasn't autogenerated by " + script_name + ": " + test) continue if args.update_only: if not first_line or 'autogenerated' not in first_line: common.warn("Skipping test which isn't autogenerated: " + test) continue raw_lines = [ m.group(1) for m in [common.RUN_LINE_RE.match(l) for l in input_lines] if m ] run_lines = [raw_lines[0]] if len(raw_lines) > 0 else [] for l in raw_lines[1:]: if run_lines[-1].endswith("\\"): run_lines[-1] = run_lines[-1].rstrip("\\") + " " + l else: run_lines.append(l) if args.verbose: print('Found %d RUN lines:' % (len(run_lines), ), file=sys.stderr) for l in run_lines: print(' RUN: ' + l, file=sys.stderr) prefix_list = [] for l in run_lines: if '|' not in l: common.warn('Skipping unparseable RUN line: ' + l) continue (tool_cmd, filecheck_cmd) = tuple([cmd.strip() for cmd in l.split('|', 1)]) common.verify_filecheck_prefixes(filecheck_cmd) if not tool_cmd.startswith(opt_basename + ' '): common.warn('WSkipping non-%s RUN line: %s' % (opt_basename, l)) continue if not filecheck_cmd.startswith('FileCheck '): common.warn('Skipping non-FileChecked RUN line: ' + l) continue tool_cmd_args = tool_cmd[len(opt_basename):].strip() tool_cmd_args = tool_cmd_args.replace('< %s', '').replace('%s', '').strip() check_prefixes = [ item for m in common.CHECK_PREFIX_RE.finditer(filecheck_cmd) for item in m.group(1).split(',') ] if not check_prefixes: check_prefixes = ['CHECK'] # FIXME: We should use multiple check prefixes to common check lines. For # now, we just ignore all but the last. prefix_list.append((check_prefixes, tool_cmd_args)) func_dict = {} for prefixes, _ in prefix_list: for prefix in prefixes: func_dict.update({prefix: dict()}) for prefixes, opt_args in prefix_list: if args.verbose: print('Extracted opt cmd: ' + opt_basename + ' ' + opt_args, file=sys.stderr) print('Extracted FileCheck prefixes: ' + str(prefixes), file=sys.stderr) raw_tool_outputs = common.invoke_tool(args.opt_binary, opt_args, test) # Split analysis outputs by "Printing analysis " declarations. for raw_tool_output in re.split(r'Printing analysis ', raw_tool_outputs): common.build_function_body_dictionary( common.ANALYZE_FUNCTION_RE, common.scrub_body, [], raw_tool_output, prefixes, func_dict, args.verbose, False) is_in_function = False is_in_function_start = False prefix_set = set( [prefix for prefixes, _ in prefix_list for prefix in prefixes]) if args.verbose: print('Rewriting FileCheck prefixes: %s' % (prefix_set, ), file=sys.stderr) output_lines = [] output_lines.append(autogenerated_note) for input_line in input_lines: if is_in_function_start: if input_line == '': continue if input_line.lstrip().startswith(';'): m = common.CHECK_RE.match(input_line) if not m or m.group(1) not in prefix_set: output_lines.append(input_line) continue # Print out the various check lines here. common.add_analyze_checks(output_lines, ';', prefix_list, func_dict, func_name) is_in_function_start = False if is_in_function: if common.should_add_line_to_output(input_line, prefix_set): # This input line of the function body will go as-is into the output. # Except make leading whitespace uniform: 2 spaces. input_line = common.SCRUB_LEADING_WHITESPACE_RE.sub( r' ', input_line) output_lines.append(input_line) else: continue if input_line.strip() == '}': is_in_function = False continue # Discard any previous script advertising. if input_line.startswith(ADVERT): continue # If it's outside a function, it just gets copied to the output. output_lines.append(input_line) m = IR_FUNCTION_RE.match(input_line) if not m: continue func_name = m.group(1) if args.function is not None and func_name != args.function: # When filtering on a specific function, skip all others. continue is_in_function = is_in_function_start = True if args.verbose: print('Writing %d lines to %s...' % (len(output_lines), test), file=sys.stderr) with open(test, 'wb') as f: f.writelines( ['{}\n'.format(l).encode('utf-8') for l in output_lines])
def main(): from argparse import RawTextHelpFormatter parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter) parser.add_argument('--opt-binary', default='opt', help='The opt binary used to generate the test case') parser.add_argument('--function', help='The function in the test file to update') parser.add_argument('-p', '--preserve-names', action='store_true', help='Do not scrub IR names') parser.add_argument( '--function-signature', action='store_true', help='Keep function signature information around for the check line') parser.add_argument( '--scrub-attributes', action='store_true', help='Remove attribute annotations (#0) from the end of check line') parser.add_argument('--check-attributes', action='store_true', help='Check "Function Attributes" for functions') parser.add_argument('tests', nargs='+') initial_args = common.parse_commandline_args(parser) script_name = os.path.basename(__file__) opt_basename = os.path.basename(initial_args.opt_binary) if not re.match(r'^opt(-\d+)?$', opt_basename): common.error('Unexpected opt name: ' + opt_basename) sys.exit(1) opt_basename = 'opt' for ti in common.itertests(initial_args.tests, parser, script_name='utils/' + script_name): # If requested we scrub trailing attribute annotations, e.g., '#0', together with whitespaces if ti.args.scrub_attributes: common.SCRUB_TRAILING_WHITESPACE_TEST_RE = common.SCRUB_TRAILING_WHITESPACE_AND_ATTRIBUTES_RE else: common.SCRUB_TRAILING_WHITESPACE_TEST_RE = common.SCRUB_TRAILING_WHITESPACE_RE prefix_list = [] for l in ti.run_lines: if '|' not in l: common.warn('Skipping unparseable RUN line: ' + l) continue (tool_cmd, filecheck_cmd) = tuple([cmd.strip() for cmd in l.split('|', 1)]) common.verify_filecheck_prefixes(filecheck_cmd) if not tool_cmd.startswith(opt_basename + ' '): common.warn('Skipping non-%s RUN line: %s' % (opt_basename, l)) continue if not filecheck_cmd.startswith('FileCheck '): common.warn('Skipping non-FileChecked RUN line: ' + l) continue tool_cmd_args = tool_cmd[len(opt_basename):].strip() tool_cmd_args = tool_cmd_args.replace('< %s', '').replace('%s', '').strip() check_prefixes = [ item for m in common.CHECK_PREFIX_RE.finditer(filecheck_cmd) for item in m.group(1).split(',') ] if not check_prefixes: check_prefixes = ['CHECK'] # FIXME: We should use multiple check prefixes to common check lines. For # now, we just ignore all but the last. prefix_list.append((check_prefixes, tool_cmd_args)) global_vars_seen_dict = {} func_dict = {} for prefixes, _ in prefix_list: for prefix in prefixes: func_dict.update({prefix: dict()}) for prefixes, opt_args in prefix_list: common.debug('Extracted opt cmd: ' + opt_basename + ' ' + opt_args) common.debug('Extracted FileCheck prefixes: ' + str(prefixes)) raw_tool_output = common.invoke_tool(ti.args.opt_binary, opt_args, ti.path) common.build_function_body_dictionary(common.OPT_FUNCTION_RE, common.scrub_body, [], raw_tool_output, prefixes, func_dict, ti.args.verbose, ti.args.function_signature, ti.args.check_attributes) is_in_function = False is_in_function_start = False prefix_set = set( [prefix for prefixes, _ in prefix_list for prefix in prefixes]) common.debug('Rewriting FileCheck prefixes:', str(prefix_set)) output_lines = [] for input_line_info in ti.iterlines(output_lines): input_line = input_line_info.line args = input_line_info.args if is_in_function_start: if input_line == '': continue if input_line.lstrip().startswith(';'): m = common.CHECK_RE.match(input_line) if not m or m.group(1) not in prefix_set: output_lines.append(input_line) continue # Print out the various check lines here. common.add_ir_checks(output_lines, ';', prefix_list, func_dict, func_name, args.preserve_names, args.function_signature, global_vars_seen_dict) is_in_function_start = False if is_in_function: if common.should_add_line_to_output(input_line, prefix_set): # This input line of the function body will go as-is into the output. # Except make leading whitespace uniform: 2 spaces. input_line = common.SCRUB_LEADING_WHITESPACE_RE.sub( r' ', input_line) output_lines.append(input_line) else: continue if input_line.strip() == '}': is_in_function = False continue # If it's outside a function, it just gets copied to the output. output_lines.append(input_line) m = common.IR_FUNCTION_RE.match(input_line) if not m: continue func_name = m.group(1) if args.function is not None and func_name != args.function: # When filtering on a specific function, skip all others. continue is_in_function = is_in_function_start = True common.debug('Writing %d lines to %s...' % (len(output_lines), ti.path)) with open(ti.path, 'wb') as f: f.writelines( ['{}\n'.format(l).encode('utf-8') for l in output_lines])
def main(): from argparse import RawTextHelpFormatter parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter) parser.add_argument('-v', '--verbose', action='store_true', help='Show verbose output') parser.add_argument('--opt-binary', default='opt', help='The opt binary used to generate the test case') parser.add_argument('--function', help='The function in the test file to update') parser.add_argument('tests', nargs='+') args = parser.parse_args() autogenerated_note = (ADVERT + 'utils/' + os.path.basename(__file__)) opt_basename = os.path.basename(args.opt_binary) if (opt_basename != "opt"): print >> sys.stderr, 'ERROR: Unexpected opt name: ' + opt_basename sys.exit(1) for test in args.tests: if args.verbose: print >> sys.stderr, 'Scanning for RUN lines in test file: %s' % ( test, ) with open(test) as f: input_lines = [l.rstrip() for l in f] raw_lines = [ m.group(1) for m in [common.RUN_LINE_RE.match(l) for l in input_lines] if m ] run_lines = [raw_lines[0]] if len(raw_lines) > 0 else [] for l in raw_lines[1:]: if run_lines[-1].endswith("\\"): run_lines[-1] = run_lines[-1].rstrip("\\") + " " + l else: run_lines.append(l) if args.verbose: print >> sys.stderr, 'Found %d RUN lines:' % (len(run_lines), ) for l in run_lines: print >> sys.stderr, ' RUN: ' + l prefix_list = [] for l in run_lines: if '|' not in l: print >> sys.stderr, 'WARNING: Skipping non-FileChecked RUN line: ' + l continue (tool_cmd, filecheck_cmd) = tuple([cmd.strip() for cmd in l.split('|', 1)]) if tool_cmd.startswith("%cheri_"): tool_cmd = tool_cmd.replace( "%cheri_purecap_opt", "opt -mtriple=cheri-unknown-freebsd -target-abi purecap -relocation-model pic -mcpu=cheri128 -mattr=+cheri128" ) tool_cmd = tool_cmd.replace( "%cheri_opt", "opt -mtriple=cheri-unknown-freebsd -mcpu=cheri128 -mattr=+cheri128" ) tool_cmd = tool_cmd.replace( "%cheri128_opt", "opt -mtriple=cheri-unknown-freebsd -mcpu=cheri128 -mattr=+cheri128" ) tool_cmd = tool_cmd.replace( "%cheri256_opt", "opt -mtriple=cheri-unknown-freebsd -mcpu=cheri256 -mattr=+cheri256" ) if not tool_cmd.startswith(opt_basename + ' '): print >> sys.stderr, 'WARNING: Skipping non-%s RUN line: %s' % ( opt_basename, l) continue if not filecheck_cmd.startswith('FileCheck '): print >> sys.stderr, 'WARNING: Skipping non-FileChecked RUN line: ' + l continue tool_cmd_args = tool_cmd[len(opt_basename):].strip() tool_cmd_args = tool_cmd_args.replace('< %s', '').replace('%s', '').strip() check_prefixes = [ item for m in common.CHECK_PREFIX_RE.finditer(filecheck_cmd) for item in m.group(1).split(',') ] if not check_prefixes: check_prefixes = ['CHECK'] # FIXME: We should use multiple check prefixes to common check lines. For # now, we just ignore all but the last. prefix_list.append((check_prefixes, tool_cmd_args)) func_dict = {} for prefixes, _ in prefix_list: for prefix in prefixes: func_dict.update({prefix: dict()}) for prefixes, opt_args in prefix_list: if args.verbose: print >> sys.stderr, 'Extracted opt cmd: ' + opt_basename + ' ' + opt_args print >> sys.stderr, 'Extracted FileCheck prefixes: ' + str( prefixes) raw_tool_output = common.invoke_tool(args.opt_binary, opt_args, test) common.build_function_body_dictionary(common.OPT_FUNCTION_RE, common.scrub_body, [], raw_tool_output, prefixes, func_dict, args.verbose) is_in_function = False is_in_function_start = False prefix_set = set( [prefix for prefixes, _ in prefix_list for prefix in prefixes]) if args.verbose: print >> sys.stderr, 'Rewriting FileCheck prefixes: %s' % ( prefix_set, ) output_lines = [] output_lines.append(autogenerated_note) for input_line in input_lines: if is_in_function_start: if input_line == '': continue if input_line.lstrip().startswith(';'): m = common.CHECK_RE.match(input_line) if not m or m.group(1) not in prefix_set: output_lines.append(input_line) continue # Print out the various check lines here. common.add_ir_checks(output_lines, ';', prefix_list, func_dict, func_name) is_in_function_start = False if is_in_function: if common.should_add_line_to_output(input_line, prefix_set): # This input line of the function body will go as-is into the output. # Except make leading whitespace uniform: 2 spaces. input_line = common.SCRUB_LEADING_WHITESPACE_RE.sub( r' ', input_line) output_lines.append(input_line) else: continue if input_line.strip() == '}': is_in_function = False continue # Discard any previous script advertising. if input_line.startswith(ADVERT): continue # If it's outside a function, it just gets copied to the output. output_lines.append(input_line) m = IR_FUNCTION_RE.match(input_line) if not m: continue func_name = m.group(1) if args.function is not None and func_name != args.function: # When filtering on a specific function, skip all others. continue is_in_function = is_in_function_start = True if args.verbose: print >> sys.stderr, 'Writing %d lines to %s...' % ( len(output_lines), test) with open(test, 'wb') as f: f.writelines([l + '\n' for l in output_lines])
def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( '--llc-binary', default=None, help='The "llc" binary to use to generate the test case') parser.add_argument( '--opt-binary', default='opt', help= 'The "opt" binary to use to generate the test case (if used for pre-processing)' ) parser.add_argument('--function', help='The function in the test file to update') parser.add_argument( '--extra_scrub', action='store_true', help= 'Always use additional regex to further reduce diffs between various subtargets' ) parser.add_argument( '--scrub-stack-indices', action='store_true', help= 'Use additional regex to further reduce diffs between 32/64-bit targets' ) parser.add_argument( '--x86_scrub_rip', action='store_true', default=True, help= 'Use more regex for x86 matching to reduce diffs between various subtargets' ) parser.add_argument('--no_x86_scrub_rip', action='store_false', dest='x86_scrub_rip') parser.add_argument('--no_x86_scrub_mem_shuffle', action='store_true', default=False, help='Reduce scrubbing shuffles with memory operands') parser.add_argument('tests', nargs='+') initial_args = common.parse_commandline_args(parser) script_name = os.path.basename(__file__) for ti in common.itertests(initial_args.tests, parser, script_name='utils/' + script_name): triple_in_ir = None for l in ti.input_lines: m = common.TRIPLE_IR_RE.match(l) if m: triple_in_ir = m.groups()[0] break run_list = [] for l in ti.run_lines: if '|' not in l: common.warn('Skipping unparseable RUN line: ' + l) continue commands = [cmd.strip() for cmd in l.split('|', 2)] preprocess_cmd = None # Allow pre-preocessing test inputs with sed, etc. if len(commands) == 3: # TODO: allow other tools first_command = commands[0] if first_command.startswith("%"): first_command = first_command.replace( "%cheri_purecap_opt", "opt -mtriple=mips64-unknown-freebsd -target-abi purecap -relocation-model pic -mcpu=cheri128 -mattr=+cheri128" ) first_command = first_command.replace( "%cheri128_purecap_opt", "opt -mtriple=mips64-unknown-freebsd -target-abi purecap -relocation-model pic -mcpu=cheri128 -mattr=+cheri128" ) first_command = first_command.replace( "%cheri256_purecap_opt", "opt -mtriple=mips64-unknown-freebsd -target-abi purecap -relocation-model pic -mcpu=cheri256 -mattr=+cheri256" ) first_command = first_command.replace( "%cheri_opt", "opt -mtriple=mips64-unknown-freebsd -mcpu=cheri128 -mattr=+cheri128" ) first_command = first_command.replace( "%cheri128_opt", "opt -mtriple=mips64-unknown-freebsd -mcpu=cheri128 -mattr=+cheri128" ) first_command = first_command.replace( "%cheri256_opt", "opt -mtriple=mips64-unknown-freebsd -mcpu=cheri256 -mattr=+cheri256" ) first_command = first_command.replace( "%riscv32_cheri_purecap_opt", "opt -mtriple=riscv32-unknown-freebsd -target-abi il32pc64 -mattr=+xcheri,+cap-mode" ) first_command = first_command.replace( "%riscv64_cheri_purecap_opt", "opt -mtriple=riscv64-unknown-freebsd -target-abi l64pc128 -mattr=+xcheri,+cap-mode" ) first_command = first_command.replace( "%riscv32_cheri_opt", "opt -mtriple=riscv32-unknown-freebsd -mattr=+xcheri") first_command = first_command.replace( "%riscv64_cheri_opt", "opt -mtriple=riscv64-unknown-freebsd -mattr=+xcheri") first_command_list = first_command.split() known_command = False if first_command_list[0] == "sed": known_command = True elif first_command_list[0] == "opt": known_command = True first_command_list[0] = ti.args.opt_binary first_command = " ".join(first_command_list) if not known_command: common.warn( 'WARNING: Skipping RUN line with more than two commands and unknown first tool: ' + l) continue # Handle known pre-processing command preprocess_cmd = first_command commands = commands[1:] llc_cmd = commands[0] if llc_cmd.startswith("%"): llc_cmd = llc_cmd.replace( "%cheri_purecap_llc", "llc -mtriple=mips64-unknown-freebsd -target-abi purecap -relocation-model pic -mcpu=cheri128 -mattr=+cheri128" ) llc_cmd = llc_cmd.replace( "%cheri128_purecap_llc", "llc -mtriple=mips64-unknown-freebsd -target-abi purecap -relocation-model pic -mcpu=cheri128 -mattr=+cheri128" ) llc_cmd = llc_cmd.replace( "%cheri256_purecap_llc", "llc -mtriple=mips64-unknown-freebsd -target-abi purecap -relocation-model pic -mcpu=cheri256 -mattr=+cheri256" ) llc_cmd = llc_cmd.replace( "%cheri_llc", "llc -mtriple=mips64-unknown-freebsd -mcpu=cheri128 -mattr=+cheri128" ) llc_cmd = llc_cmd.replace( "%cheri128_llc", "llc -mtriple=mips64-unknown-freebsd -mcpu=cheri128 -mattr=+cheri128" ) llc_cmd = llc_cmd.replace( "%cheri256_llc", "llc -mtriple=mips64-unknown-freebsd -mcpu=cheri256 -mattr=+cheri256" ) llc_cmd = llc_cmd.replace( "%riscv32_cheri_purecap_llc", "llc -mtriple=riscv32-unknown-freebsd -target-abi il32pc64 -mattr=+xcheri,+cap-mode" ) llc_cmd = llc_cmd.replace( "%riscv64_cheri_purecap_llc", "llc -mtriple=riscv64-unknown-freebsd -target-abi l64pc128 -mattr=+xcheri,+cap-mode" ) llc_cmd = llc_cmd.replace( "%riscv32_cheri_llc", "llc -mtriple=riscv32-unknown-freebsd -mattr=+xcheri") llc_cmd = llc_cmd.replace( "%riscv64_cheri_llc", "llc -mtriple=riscv64-unknown-freebsd -mattr=+xcheri") llc_tool = llc_cmd.split(' ')[0] triple_in_cmd = None m = common.TRIPLE_ARG_RE.search(llc_cmd) if m: triple_in_cmd = m.groups()[0] march_in_cmd = None m = common.MARCH_ARG_RE.search(llc_cmd) if m: march_in_cmd = m.groups()[0] filecheck_cmd = '' if len(commands) > 1: filecheck_cmd = commands[1] if filecheck_cmd.startswith("%cheri64_FileCheck"): filecheck_cmd = filecheck_cmd.replace( "%cheri64_FileCheck", "FileCheck '-D#CAP_SIZE=8'") elif filecheck_cmd.startswith("%cheri128_FileCheck"): filecheck_cmd = filecheck_cmd.replace( "%cheri128_FileCheck", "FileCheck '-D#CAP_SIZE=16'") elif filecheck_cmd.startswith("%cheri_FileCheck"): filecheck_cmd = filecheck_cmd.replace( "%cheri_FileCheck", "FileCheck '-D#CAP_SIZE=16'") common.verify_filecheck_prefixes(filecheck_cmd) if llc_tool not in LLC_LIKE_TOOLS: common.warn('Skipping non-llc RUN line: ' + l) continue if not filecheck_cmd.startswith('FileCheck '): common.warn('Skipping non-FileChecked RUN line: ' + l) continue llc_cmd_args = llc_cmd[len(llc_tool):].strip() llc_cmd_args = llc_cmd_args.replace('< %s', '').replace('%s', '').strip() if ti.path.endswith('.mir'): llc_cmd_args += ' -x mir' check_prefixes = [ item for m in common.CHECK_PREFIX_RE.finditer(filecheck_cmd) for item in m.group(1).split(',') ] if not check_prefixes: check_prefixes = ['CHECK'] # FIXME: We should use multiple check prefixes to common check lines. For # now, we just ignore all but the last. run_list.append((check_prefixes, llc_cmd_args, triple_in_cmd, preprocess_cmd, march_in_cmd)) if ti.path.endswith('.mir'): check_indent = ' ' else: check_indent = '' func_dict = {} for p in run_list: prefixes = p[0] for prefix in prefixes: func_dict.update({prefix: dict()}) for prefixes, llc_args, triple_in_cmd, preprocess_cmd, march_in_cmd in run_list: common.debug('Extracted LLC cmd:', llc_tool, llc_args) common.debug('Extracted FileCheck prefixes:', str(prefixes)) if preprocess_cmd: common.debug('Extracted pre-processing command: ' + str(preprocess_cmd)) raw_tool_output = common.invoke_tool( ti.args.llc_binary or llc_tool, llc_args, ti.path, preprocess_cmd) triple = triple_in_cmd or triple_in_ir if not triple: triple = asm.get_triple_from_march(march_in_cmd) asm.build_function_body_dictionary_for_triple( ti.args, raw_tool_output, triple, prefixes, func_dict) is_in_function = False is_in_function_start = False func_name = None prefix_set = set([prefix for p in run_list for prefix in p[0]]) common.debug('Rewriting FileCheck prefixes:', str(prefix_set)) output_lines = [] for input_info in ti.iterlines(output_lines): input_line = input_info.line args = input_info.args if is_in_function_start: if input_line == '': continue if input_line.lstrip().startswith(';'): m = common.CHECK_RE.match(input_line) if not m or m.group(1) not in prefix_set: output_lines.append(input_line) continue # Print out the various check lines here. asm.add_asm_checks(output_lines, check_indent + ';', run_list, func_dict, func_name) is_in_function_start = False if is_in_function: if common.should_add_line_to_output(input_line, prefix_set): # This input line of the function body will go as-is into the output. output_lines.append(input_line) else: continue if input_line.strip() == '}': is_in_function = False continue # If it's outside a function, it just gets copied to the output. output_lines.append(input_line) m = common.IR_FUNCTION_RE.match(input_line) if not m: continue func_name = m.group(1) if args.function is not None and func_name != args.function: # When filtering on a specific function, skip all others. continue is_in_function = is_in_function_start = True common.debug('Writing %d lines to %s...' % (len(output_lines), ti.path)) with open(ti.path, 'wb') as f: f.writelines( ['{}\n'.format(l).encode('utf-8') for l in output_lines])
def main(): from argparse import RawTextHelpFormatter parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter) parser.add_argument('--opt-binary', default='opt', help='The opt binary used to generate the test case') parser.add_argument('--function', help='The function in the test file to update') parser.add_argument('tests', nargs='+') initial_args = common.parse_commandline_args(parser) script_name = os.path.basename(__file__) opt_basename = os.path.basename(initial_args.opt_binary) if (opt_basename != "opt"): common.error('Unexpected opt name: ' + opt_basename) sys.exit(1) for ti in common.itertests(initial_args.tests, parser, script_name='utils/' + script_name): triple_in_ir = None for l in ti.input_lines: m = common.TRIPLE_IR_RE.match(l) if m: triple_in_ir = m.groups()[0] break prefix_list = [] for l in ti.run_lines: if '|' not in l: common.warn('Skipping unparseable RUN line: ' + l) continue (tool_cmd, filecheck_cmd) = tuple([cmd.strip() for cmd in l.split('|', 1)]) common.verify_filecheck_prefixes(filecheck_cmd) if not tool_cmd.startswith(opt_basename + ' '): common.warn('WSkipping non-%s RUN line: %s' % (opt_basename, l)) continue if not filecheck_cmd.startswith('FileCheck '): common.warn('Skipping non-FileChecked RUN line: ' + l) continue tool_cmd_args = tool_cmd[len(opt_basename):].strip() tool_cmd_args = tool_cmd_args.replace('< %s', '').replace('%s', '').strip() check_prefixes = [ item for m in common.CHECK_PREFIX_RE.finditer(filecheck_cmd) for item in m.group(1).split(',') ] if not check_prefixes: check_prefixes = ['CHECK'] # FIXME: We should use multiple check prefixes to common check lines. For # now, we just ignore all but the last. prefix_list.append((check_prefixes, tool_cmd_args)) builder = common.FunctionTestBuilder( run_list=prefix_list, flags=type( '', (object, ), { 'verbose': ti.args.verbose, 'filters': ti.args.filters, 'function_signature': False, 'check_attributes': False, 'replace_value_regex': [] }), scrubber_args=[], path=ti.path) for prefixes, opt_args in prefix_list: common.debug('Extracted opt cmd:', opt_basename, opt_args, file=sys.stderr) common.debug('Extracted FileCheck prefixes:', str(prefixes), file=sys.stderr) raw_tool_outputs = common.invoke_tool(ti.args.opt_binary, opt_args, ti.path) # Split analysis outputs by "Printing analysis " declarations. for raw_tool_output in re.split(r'Printing analysis ', raw_tool_outputs): builder.process_run_line(common.ANALYZE_FUNCTION_RE, common.scrub_body, raw_tool_output, prefixes, False) func_dict = builder.finish_and_get_func_dict() is_in_function = False is_in_function_start = False prefix_set = set( [prefix for prefixes, _ in prefix_list for prefix in prefixes]) common.debug('Rewriting FileCheck prefixes:', str(prefix_set), file=sys.stderr) output_lines = [] for input_info in ti.iterlines(output_lines): input_line = input_info.line args = input_info.args if is_in_function_start: if input_line == '': continue if input_line.lstrip().startswith(';'): m = common.CHECK_RE.match(input_line) if not m or m.group(1) not in prefix_set: output_lines.append(input_line) continue # Print out the various check lines here. common.add_analyze_checks(output_lines, ';', prefix_list, func_dict, func_name, is_filtered=builder.is_filtered()) is_in_function_start = False if is_in_function: if common.should_add_line_to_output(input_line, prefix_set): # This input line of the function body will go as-is into the output. # Except make leading whitespace uniform: 2 spaces. input_line = common.SCRUB_LEADING_WHITESPACE_RE.sub( r' ', input_line) output_lines.append(input_line) else: continue if input_line.strip() == '}': is_in_function = False continue # If it's outside a function, it just gets copied to the output. output_lines.append(input_line) m = common.IR_FUNCTION_RE.match(input_line) if not m: continue func_name = m.group(1) if ti.args.function is not None and func_name != ti.args.function: # When filtering on a specific function, skip all others. continue is_in_function = is_in_function_start = True common.debug('Writing %d lines to %s...' % (len(output_lines), ti.path)) with open(ti.path, 'wb') as f: f.writelines( ['{}\n'.format(l).encode('utf-8') for l in output_lines])
def main(): from argparse import RawTextHelpFormatter parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter) parser.add_argument('--opt-binary', default='opt', help='The opt binary used to generate the test case') parser.add_argument('--function', help='The function in the test file to update') parser.add_argument('-p', '--preserve-names', action='store_true', help='Do not scrub IR names') parser.add_argument( '--function-signature', action='store_true', help='Keep function signature information around for the check line') parser.add_argument( '--scrub-attributes', action='store_true', help='Remove attribute annotations (#0) from the end of check line') parser.add_argument('--check-attributes', action='store_true', help='Check "Function Attributes" for functions') parser.add_argument( '--check-globals', action='store_true', help= 'Check global entries (global variables, metadata, attribute sets, ...) for functions' ) parser.add_argument('tests', nargs='+') initial_args = common.parse_commandline_args(parser) script_name = os.path.basename(__file__) opt_basename = os.path.basename(initial_args.opt_binary) if not re.match(r'^opt(-\d+)?(\.exe)?$', opt_basename): common.error('Unexpected opt name: ' + opt_basename) sys.exit(1) opt_basename = 'opt' for ti in common.itertests(initial_args.tests, parser, script_name='utils/' + script_name): # If requested we scrub trailing attribute annotations, e.g., '#0', together with whitespaces if ti.args.scrub_attributes: common.SCRUB_TRAILING_WHITESPACE_TEST_RE = common.SCRUB_TRAILING_WHITESPACE_AND_ATTRIBUTES_RE else: common.SCRUB_TRAILING_WHITESPACE_TEST_RE = common.SCRUB_TRAILING_WHITESPACE_RE prefix_list = [] for l in ti.run_lines: if '|' not in l: common.warn('Skipping unparseable RUN line: ' + l) continue commands = [cmd.strip() for cmd in l.split('|')] assert len(commands) >= 2 preprocess_cmd = None if len(commands) > 2: preprocess_cmd = " | ".join(commands[:-2]) tool_cmd = commands[-2] filecheck_cmd = commands[-1] common.verify_filecheck_prefixes(filecheck_cmd) if not tool_cmd.startswith(opt_basename + ' '): common.warn('Skipping non-%s RUN line: %s' % (opt_basename, l)) continue if not filecheck_cmd.startswith('FileCheck '): common.warn('Skipping non-FileChecked RUN line: ' + l) continue tool_cmd_args = tool_cmd[len(opt_basename):].strip() tool_cmd_args = tool_cmd_args.replace('< %s', '').replace('%s', '').strip() check_prefixes = [ item for m in common.CHECK_PREFIX_RE.finditer(filecheck_cmd) for item in m.group(1).split(',') ] if not check_prefixes: check_prefixes = ['CHECK'] # FIXME: We should use multiple check prefixes to common check lines. For # now, we just ignore all but the last. prefix_list.append((check_prefixes, tool_cmd_args, preprocess_cmd)) global_vars_seen_dict = {} builder = common.FunctionTestBuilder(run_list=prefix_list, flags=ti.args, scrubber_args=[], path=ti.path) for prefixes, opt_args, preprocess_cmd in prefix_list: common.debug('Extracted opt cmd: ' + opt_basename + ' ' + opt_args) common.debug('Extracted FileCheck prefixes: ' + str(prefixes)) raw_tool_output = common.invoke_tool(ti.args.opt_binary, opt_args, ti.path, preprocess_cmd=preprocess_cmd, verbose=ti.args.verbose) builder.process_run_line(common.OPT_FUNCTION_RE, common.scrub_body, raw_tool_output, prefixes, False) builder.processed_prefixes(prefixes) func_dict = builder.finish_and_get_func_dict() is_in_function = False is_in_function_start = False has_checked_pre_function_globals = False prefix_set = set( [prefix for prefixes, _, _ in prefix_list for prefix in prefixes]) common.debug('Rewriting FileCheck prefixes:', str(prefix_set)) output_lines = [] include_generated_funcs = common.find_arg_in_test( ti, lambda args: ti.args.include_generated_funcs, '--include-generated-funcs', True) if include_generated_funcs: # Generate the appropriate checks for each function. We need to emit # these in the order according to the generated output so that CHECK-LABEL # works properly. func_order provides that. # We can't predict where various passes might insert functions so we can't # be sure the input function order is maintained. Therefore, first spit # out all the source lines. common.dump_input_lines(output_lines, ti, prefix_set, ';') args = ti.args if args.check_globals: common.add_global_checks(builder.global_var_dict(), ';', prefix_list, output_lines, global_vars_seen_dict, args.preserve_names, True) # Now generate all the checks. common.add_checks_at_end( output_lines, prefix_list, builder.func_order(), ';', lambda my_output_lines, prefixes, func: common.add_ir_checks( my_output_lines, ';', prefixes, func_dict, func, False, args.function_signature, global_vars_seen_dict, is_filtered=builder.is_filtered())) else: # "Normal" mode. for input_line_info in ti.iterlines(output_lines): input_line = input_line_info.line args = input_line_info.args if is_in_function_start: if input_line == '': continue if input_line.lstrip().startswith(';'): m = common.CHECK_RE.match(input_line) if not m or m.group(1) not in prefix_set: output_lines.append(input_line) continue # Print out the various check lines here. common.add_ir_checks(output_lines, ';', prefix_list, func_dict, func_name, args.preserve_names, args.function_signature, global_vars_seen_dict, is_filtered=builder.is_filtered()) is_in_function_start = False m = common.IR_FUNCTION_RE.match(input_line) if m and not has_checked_pre_function_globals: if args.check_globals: common.add_global_checks(builder.global_var_dict(), ';', prefix_list, output_lines, global_vars_seen_dict, args.preserve_names, True) has_checked_pre_function_globals = True if common.should_add_line_to_output(input_line, prefix_set, not is_in_function): # This input line of the function body will go as-is into the output. # Except make leading whitespace uniform: 2 spaces. input_line = common.SCRUB_LEADING_WHITESPACE_RE.sub( r' ', input_line) output_lines.append(input_line) if input_line.strip() == '}': is_in_function = False continue if is_in_function: continue m = common.IR_FUNCTION_RE.match(input_line) if not m: continue func_name = m.group(1) if args.function is not None and func_name != args.function: # When filtering on a specific function, skip all others. continue is_in_function = is_in_function_start = True if args.check_globals: common.add_global_checks(builder.global_var_dict(), ';', prefix_list, output_lines, global_vars_seen_dict, args.preserve_names, False) common.debug('Writing %d lines to %s...' % (len(output_lines), ti.path)) with open(ti.path, 'wb') as f: f.writelines( ['{}\n'.format(l).encode('utf-8') for l in output_lines])
def main(): from argparse import RawTextHelpFormatter parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter) parser.add_argument('-v', '--verbose', action='store_true', help='Show verbose output') parser.add_argument('--opt-binary', default='opt', help='The opt binary used to generate the test case') parser.add_argument( '--function', help='The function in the test file to update') parser.add_argument('tests', nargs='+') args = parser.parse_args() autogenerated_note = (ADVERT + 'utils/' + os.path.basename(__file__)) opt_basename = os.path.basename(args.opt_binary) if (opt_basename != "opt"): print >>sys.stderr, 'ERROR: Unexpected opt name: ' + opt_basename sys.exit(1) for test in args.tests: if args.verbose: print >>sys.stderr, 'Scanning for RUN lines in test file: %s' % (test,) with open(test) as f: input_lines = [l.rstrip() for l in f] raw_lines = [m.group(1) for m in [common.RUN_LINE_RE.match(l) for l in input_lines] if m] run_lines = [raw_lines[0]] if len(raw_lines) > 0 else [] for l in raw_lines[1:]: if run_lines[-1].endswith("\\"): run_lines[-1] = run_lines[-1].rstrip("\\") + " " + l else: run_lines.append(l) if args.verbose: print >>sys.stderr, 'Found %d RUN lines:' % (len(run_lines),) for l in run_lines: print >>sys.stderr, ' RUN: ' + l prefix_list = [] for l in run_lines: (tool_cmd, filecheck_cmd) = tuple([cmd.strip() for cmd in l.split('|', 1)]) if not tool_cmd.startswith(opt_basename + ' '): print >>sys.stderr, 'WARNING: Skipping non-%s RUN line: %s' % (opt_basename, l) continue if not filecheck_cmd.startswith('FileCheck '): print >>sys.stderr, 'WARNING: Skipping non-FileChecked RUN line: ' + l continue tool_cmd_args = tool_cmd[len(opt_basename):].strip() tool_cmd_args = tool_cmd_args.replace('< %s', '').replace('%s', '').strip() check_prefixes = [item for m in common.CHECK_PREFIX_RE.finditer(filecheck_cmd) for item in m.group(1).split(',')] if not check_prefixes: check_prefixes = ['CHECK'] # FIXME: We should use multiple check prefixes to common check lines. For # now, we just ignore all but the last. prefix_list.append((check_prefixes, tool_cmd_args)) func_dict = {} for prefixes, _ in prefix_list: for prefix in prefixes: func_dict.update({prefix: dict()}) for prefixes, opt_args in prefix_list: if args.verbose: print >>sys.stderr, 'Extracted opt cmd: ' + opt_basename + ' ' + opt_args print >>sys.stderr, 'Extracted FileCheck prefixes: ' + str(prefixes) raw_tool_outputs = common.invoke_tool(args.opt_binary, opt_args, test) # Split analysis outputs by "Printing analysis " declarations. for raw_tool_output in re.split(r'Printing analysis ', raw_tool_outputs): common.build_function_body_dictionary( common.ANALYZE_FUNCTION_RE, common.scrub_body, [], raw_tool_output, prefixes, func_dict, args.verbose) is_in_function = False is_in_function_start = False prefix_set = set([prefix for prefixes, _ in prefix_list for prefix in prefixes]) if args.verbose: print >>sys.stderr, 'Rewriting FileCheck prefixes: %s' % (prefix_set,) output_lines = [] output_lines.append(autogenerated_note) for input_line in input_lines: if is_in_function_start: if input_line == '': continue if input_line.lstrip().startswith(';'): m = common.CHECK_RE.match(input_line) if not m or m.group(1) not in prefix_set: output_lines.append(input_line) continue # Print out the various check lines here. common.add_analyze_checks(output_lines, ';', prefix_list, func_dict, func_name) is_in_function_start = False if is_in_function: if common.should_add_line_to_output(input_line, prefix_set): # This input line of the function body will go as-is into the output. # Except make leading whitespace uniform: 2 spaces. input_line = common.SCRUB_LEADING_WHITESPACE_RE.sub(r' ', input_line) output_lines.append(input_line) else: continue if input_line.strip() == '}': is_in_function = False continue # Discard any previous script advertising. if input_line.startswith(ADVERT): continue # If it's outside a function, it just gets copied to the output. output_lines.append(input_line) m = IR_FUNCTION_RE.match(input_line) if not m: continue func_name = m.group(1) if args.function is not None and func_name != args.function: # When filtering on a specific function, skip all others. continue is_in_function = is_in_function_start = True if args.verbose: print>>sys.stderr, 'Writing %d lines to %s...' % (len(output_lines), test) with open(test, 'wb') as f: f.writelines([l + '\n' for l in output_lines])
def main(): from argparse import RawTextHelpFormatter parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter) parser.add_argument('--opt-binary', default='opt', help='The opt binary used to generate the test case') parser.add_argument( '--function', help='The function in the test file to update') parser.add_argument('-p', '--preserve-names', action='store_true', help='Do not scrub IR names') parser.add_argument('--function-signature', action='store_true', help='Keep function signature information around for the check line') parser.add_argument('--scrub-attributes', action='store_true', help='Remove attribute annotations (#0) from the end of check line') parser.add_argument('--enable', action='store_true', dest='enabled', default=True, help='Activate CHECK line generation from this point forward') parser.add_argument('--disable', action='store_false', dest='enabled', help='Deactivate CHECK line generation from this point forward') parser.add_argument('tests', nargs='+') args = common.parse_commandline_args(parser) script_name = os.path.basename(__file__) autogenerated_note = (ADVERT + 'utils/' + script_name) opt_basename = os.path.basename(args.opt_binary) if not re.match(r'^opt(-\d+)?$', opt_basename): common.error('Unexpected opt name: ' + opt_basename) sys.exit(1) opt_basename = 'opt' for test in args.tests: if not glob.glob(test): common.warn("Test file pattern '%s' was not found. Ignoring it." % (test,)) continue # On Windows we must expand the patterns ourselves. test_paths = [test for pattern in args.tests for test in glob.glob(pattern)] for test in test_paths: argv = sys.argv[:] args = parser.parse_args() with open(test) as f: input_lines = [l.rstrip() for l in f] first_line = input_lines[0] if input_lines else "" if 'autogenerated' in first_line and script_name not in first_line: common.warn("Skipping test which wasn't autogenerated by " + script_name, test) continue if first_line and 'autogenerated' in first_line: args, argv = common.check_for_command(first_line, parser, args, argv) test_autogenerated_note = autogenerated_note + common.get_autogennote_suffix(parser, args) if args.update_only: if not first_line or 'autogenerated' not in first_line: common.warn("Skipping test which isn't autogenerated: " + test) continue run_lines = common.find_run_lines(test, input_lines) # If requested we scrub trailing attribute annotations, e.g., '#0', together with whitespaces if args.scrub_attributes: common.SCRUB_TRAILING_WHITESPACE_TEST_RE = common.SCRUB_TRAILING_WHITESPACE_AND_ATTRIBUTES_RE else: common.SCRUB_TRAILING_WHITESPACE_TEST_RE = common.SCRUB_TRAILING_WHITESPACE_RE prefix_list = [] for l in run_lines: if '|' not in l: common.warn('Skipping unparseable RUN line: ' + l) continue (tool_cmd, filecheck_cmd) = tuple([cmd.strip() for cmd in l.split('|', 1)]) common.verify_filecheck_prefixes(filecheck_cmd) if not tool_cmd.startswith(opt_basename + ' '): common.warn('Skipping non-%s RUN line: %s' % (opt_basename, l)) continue if not filecheck_cmd.startswith('FileCheck '): common.warn('Skipping non-FileChecked RUN line: ' + l) continue tool_cmd_args = tool_cmd[len(opt_basename):].strip() tool_cmd_args = tool_cmd_args.replace('< %s', '').replace('%s', '').strip() check_prefixes = [item for m in common.CHECK_PREFIX_RE.finditer(filecheck_cmd) for item in m.group(1).split(',')] if not check_prefixes: check_prefixes = ['CHECK'] # FIXME: We should use multiple check prefixes to common check lines. For # now, we just ignore all but the last. prefix_list.append((check_prefixes, tool_cmd_args)) func_dict = {} for prefixes, _ in prefix_list: for prefix in prefixes: func_dict.update({prefix: dict()}) for prefixes, opt_args in prefix_list: common.debug('Extracted opt cmd: ' + opt_basename + ' ' + opt_args) common.debug('Extracted FileCheck prefixes: ' + str(prefixes)) raw_tool_output = common.invoke_tool(args.opt_binary, opt_args, test) common.build_function_body_dictionary( common.OPT_FUNCTION_RE, common.scrub_body, [], raw_tool_output, prefixes, func_dict, args.verbose, args.function_signature) is_in_function = False is_in_function_start = False prefix_set = set([prefix for prefixes, _ in prefix_list for prefix in prefixes]) common.debug('Rewriting FileCheck prefixes:', str(prefix_set)) output_lines = [] output_lines.append(test_autogenerated_note) for input_line in input_lines: # Discard any previous script advertising. if input_line.startswith(ADVERT): continue args, argv = common.check_for_command(input_line, parser, args, argv) if not args.enabled: output_lines.append(input_line) continue if is_in_function_start: if input_line == '': continue if input_line.lstrip().startswith(';'): m = common.CHECK_RE.match(input_line) if not m or m.group(1) not in prefix_set: output_lines.append(input_line) continue # Print out the various check lines here. common.add_ir_checks(output_lines, ';', prefix_list, func_dict, func_name, args.preserve_names, args.function_signature) is_in_function_start = False if is_in_function: if common.should_add_line_to_output(input_line, prefix_set): # This input line of the function body will go as-is into the output. # Except make leading whitespace uniform: 2 spaces. input_line = common.SCRUB_LEADING_WHITESPACE_RE.sub(r' ', input_line) output_lines.append(input_line) else: continue if input_line.strip() == '}': is_in_function = False continue # If it's outside a function, it just gets copied to the output. output_lines.append(input_line) m = IR_FUNCTION_RE.match(input_line) if not m: continue func_name = m.group(1) if args.function is not None and func_name != args.function: # When filtering on a specific function, skip all others. continue is_in_function = is_in_function_start = True common.debug('Writing %d lines to %s...' % (len(output_lines), test)) with open(test, 'wb') as f: f.writelines(['{}\n'.format(l).encode('utf-8') for l in output_lines])
def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-v', '--verbose', action='store_true', help='Show verbose output') parser.add_argument('--llc-binary', default='llc', help='The "llc" binary to use to generate the test case') parser.add_argument( '--function', help='The function in the test file to update') parser.add_argument( '--extra_scrub', action='store_true', help='Always use additional regex to further reduce diffs between various subtargets') parser.add_argument( '--x86_scrub_rip', action='store_true', default=True, help='Use more regex for x86 matching to reduce diffs between various subtargets') parser.add_argument( '--no_x86_scrub_rip', action='store_false', dest='x86_scrub_rip') parser.add_argument('tests', nargs='+') args = parser.parse_args() autogenerated_note = (ADVERT + 'utils/' + os.path.basename(__file__)) for test in args.tests: if args.verbose: print('Scanning for RUN lines in test file: %s' % (test,), file=sys.stderr) with open(test) as f: input_lines = [l.rstrip() for l in f] triple_in_ir = None for l in input_lines: m = common.TRIPLE_IR_RE.match(l) if m: triple_in_ir = m.groups()[0] break raw_lines = [m.group(1) for m in [common.RUN_LINE_RE.match(l) for l in input_lines] if m] run_lines = [raw_lines[0]] if len(raw_lines) > 0 else [] for l in raw_lines[1:]: if run_lines[-1].endswith("\\"): run_lines[-1] = run_lines[-1].rstrip("\\") + " " + l else: run_lines.append(l) if args.verbose: print('Found %d RUN lines:' % (len(run_lines),), file=sys.stderr) for l in run_lines: print(' RUN: ' + l, file=sys.stderr) run_list = [] for l in run_lines: commands = [cmd.strip() for cmd in l.split('|', 1)] llc_cmd = commands[0] triple_in_cmd = None m = common.TRIPLE_ARG_RE.search(llc_cmd) if m: triple_in_cmd = m.groups()[0] filecheck_cmd = '' if len(commands) > 1: filecheck_cmd = commands[1] if not llc_cmd.startswith('llc '): print('WARNING: Skipping non-llc RUN line: ' + l, file=sys.stderr) continue if not filecheck_cmd.startswith('FileCheck '): print('WARNING: Skipping non-FileChecked RUN line: ' + l, file=sys.stderr) continue llc_cmd_args = llc_cmd[len('llc'):].strip() llc_cmd_args = llc_cmd_args.replace('< %s', '').replace('%s', '').strip() check_prefixes = [item for m in common.CHECK_PREFIX_RE.finditer(filecheck_cmd) for item in m.group(1).split(',')] if not check_prefixes: check_prefixes = ['CHECK'] # FIXME: We should use multiple check prefixes to common check lines. For # now, we just ignore all but the last. run_list.append((check_prefixes, llc_cmd_args, triple_in_cmd)) func_dict = {} for p in run_list: prefixes = p[0] for prefix in prefixes: func_dict.update({prefix: dict()}) for prefixes, llc_args, triple_in_cmd in run_list: if args.verbose: print('Extracted LLC cmd: llc ' + llc_args, file=sys.stderr) print('Extracted FileCheck prefixes: ' + str(prefixes), file=sys.stderr) raw_tool_output = common.invoke_tool(args.llc_binary, llc_args, test) if not (triple_in_cmd or triple_in_ir): print("Cannot find a triple. Assume 'x86'", file=sys.stderr) asm.build_function_body_dictionary_for_triple(args, raw_tool_output, triple_in_cmd or triple_in_ir or 'x86', prefixes, func_dict) is_in_function = False is_in_function_start = False func_name = None prefix_set = set([prefix for p in run_list for prefix in p[0]]) if args.verbose: print('Rewriting FileCheck prefixes: %s' % (prefix_set,), file=sys.stderr) output_lines = [] output_lines.append(autogenerated_note) for input_line in input_lines: if is_in_function_start: if input_line == '': continue if input_line.lstrip().startswith(';'): m = common.CHECK_RE.match(input_line) if not m or m.group(1) not in prefix_set: output_lines.append(input_line) continue # Print out the various check lines here. asm.add_asm_checks(output_lines, ';', run_list, func_dict, func_name) is_in_function_start = False if is_in_function: if common.should_add_line_to_output(input_line, prefix_set): # This input line of the function body will go as-is into the output. output_lines.append(input_line) else: continue if input_line.strip() == '}': is_in_function = False continue # Discard any previous script advertising. if input_line.startswith(ADVERT): continue # If it's outside a function, it just gets copied to the output. output_lines.append(input_line) m = common.IR_FUNCTION_RE.match(input_line) if not m: continue func_name = m.group(1) if args.function is not None and func_name != args.function: # When filtering on a specific function, skip all others. continue is_in_function = is_in_function_start = True if args.verbose: print('Writing %d lines to %s...' % (len(output_lines), test), file=sys.stderr) with open(test, 'wb') as f: f.writelines([l + '\n' for l in output_lines])
def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( '--llc-binary', default=None, help='The "llc" binary to use to generate the test case') parser.add_argument('--function', help='The function in the test file to update') parser.add_argument( '--extra_scrub', action='store_true', help= 'Always use additional regex to further reduce diffs between various subtargets' ) parser.add_argument( '--x86_scrub_sp', action='store_true', default=True, help= 'Use regex for x86 sp matching to reduce diffs between various subtargets' ) parser.add_argument('--no_x86_scrub_sp', action='store_false', dest='x86_scrub_sp') parser.add_argument( '--x86_scrub_rip', action='store_true', default=True, help= 'Use more regex for x86 rip matching to reduce diffs between various subtargets' ) parser.add_argument('--no_x86_scrub_rip', action='store_false', dest='x86_scrub_rip') parser.add_argument('--no_x86_scrub_mem_shuffle', action='store_true', default=False, help='Reduce scrubbing shuffles with memory operands') parser.add_argument('tests', nargs='+') initial_args = common.parse_commandline_args(parser) script_name = os.path.basename(__file__) for ti in common.itertests(initial_args.tests, parser, script_name='utils/' + script_name): triple_in_ir = None for l in ti.input_lines: m = common.TRIPLE_IR_RE.match(l) if m: triple_in_ir = m.groups()[0] break run_list = [] for l in ti.run_lines: if '|' not in l: common.warn('Skipping unparseable RUN line: ' + l) continue commands = [cmd.strip() for cmd in l.split('|', 1)] llc_cmd = commands[0] llc_tool = llc_cmd.split(' ')[0] triple_in_cmd = None m = common.TRIPLE_ARG_RE.search(llc_cmd) if m: triple_in_cmd = m.groups()[0] march_in_cmd = None m = common.MARCH_ARG_RE.search(llc_cmd) if m: march_in_cmd = m.groups()[0] filecheck_cmd = '' if len(commands) > 1: filecheck_cmd = commands[1] common.verify_filecheck_prefixes(filecheck_cmd) if llc_tool not in LLC_LIKE_TOOLS: common.warn('Skipping non-llc RUN line: ' + l) continue if not filecheck_cmd.startswith('FileCheck '): common.warn('Skipping non-FileChecked RUN line: ' + l) continue llc_cmd_args = llc_cmd[len(llc_tool):].strip() llc_cmd_args = llc_cmd_args.replace('< %s', '').replace('%s', '').strip() if ti.path.endswith('.mir'): llc_cmd_args += ' -x mir' check_prefixes = [ item for m in common.CHECK_PREFIX_RE.finditer(filecheck_cmd) for item in m.group(1).split(',') ] if not check_prefixes: check_prefixes = ['CHECK'] # FIXME: We should use multiple check prefixes to common check lines. For # now, we just ignore all but the last. run_list.append( (check_prefixes, llc_cmd_args, triple_in_cmd, march_in_cmd)) if ti.path.endswith('.mir'): check_indent = ' ' else: check_indent = '' func_dict = {} func_order = {} for p in run_list: prefixes = p[0] for prefix in prefixes: func_dict.update({prefix: dict()}) func_order.update({prefix: []}) for prefixes, llc_args, triple_in_cmd, march_in_cmd in run_list: common.debug('Extracted LLC cmd:', llc_tool, llc_args) common.debug('Extracted FileCheck prefixes:', str(prefixes)) raw_tool_output = common.invoke_tool( ti.args.llc_binary or llc_tool, llc_args, ti.path) triple = triple_in_cmd or triple_in_ir if not triple: triple = asm.get_triple_from_march(march_in_cmd) asm.build_function_body_dictionary_for_triple( ti.args, raw_tool_output, triple, prefixes, func_dict, func_order) is_in_function = False is_in_function_start = False func_name = None prefix_set = set([prefix for p in run_list for prefix in p[0]]) common.debug('Rewriting FileCheck prefixes:', str(prefix_set)) output_lines = [] include_generated_funcs = common.find_arg_in_test( ti, lambda args: ti.args.include_generated_funcs, '--include-generated-funcs', True) if include_generated_funcs: # Generate the appropriate checks for each function. We need to emit # these in the order according to the generated output so that CHECK-LABEL # works properly. func_order provides that. # We can't predict where various passes might insert functions so we can't # be sure the input function order is maintained. Therefore, first spit # out all the source lines. common.dump_input_lines(output_lines, ti, prefix_set, ';') # Now generate all the checks. common.add_checks_at_end( output_lines, run_list, func_order, check_indent + ';', lambda my_output_lines, prefixes, func: asm.add_asm_checks( my_output_lines, check_indent + ';', prefixes, func_dict, func)) else: for input_info in ti.iterlines(output_lines): input_line = input_info.line args = input_info.args if is_in_function_start: if input_line == '': continue if input_line.lstrip().startswith(';'): m = common.CHECK_RE.match(input_line) if not m or m.group(1) not in prefix_set: output_lines.append(input_line) continue # Print out the various check lines here. asm.add_asm_checks(output_lines, check_indent + ';', run_list, func_dict, func_name) is_in_function_start = False if is_in_function: if common.should_add_line_to_output( input_line, prefix_set): # This input line of the function body will go as-is into the output. output_lines.append(input_line) else: continue if input_line.strip() == '}': is_in_function = False continue # If it's outside a function, it just gets copied to the output. output_lines.append(input_line) m = common.IR_FUNCTION_RE.match(input_line) if not m: continue func_name = m.group(1) if args.function is not None and func_name != args.function: # When filtering on a specific function, skip all others. continue is_in_function = is_in_function_start = True common.debug('Writing %d lines to %s...' % (len(output_lines), ti.path)) with open(ti.path, 'wb') as f: f.writelines( ['{}\n'.format(l).encode('utf-8') for l in output_lines])
def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-v', '--verbose', action='store_true', help='Show verbose output') parser.add_argument('--llc-binary', default='llc', help='The "llc" binary to use to generate the test case') parser.add_argument( '--function', help='The function in the test file to update') parser.add_argument( '--x86_extra_scrub', action='store_true', help='Use more regex for x86 matching to reduce diffs between various subtargets') parser.add_argument('tests', nargs='+') args = parser.parse_args() autogenerated_note = (ADVERT + 'utils/' + os.path.basename(__file__)) for test in args.tests: if args.verbose: print >>sys.stderr, 'Scanning for RUN lines in test file: %s' % (test,) with open(test) as f: input_lines = [l.rstrip() for l in f] triple_in_ir = None for l in input_lines: m = common.TRIPLE_IR_RE.match(l) if m: triple_in_ir = m.groups()[0] break raw_lines = [m.group(1) for m in [common.RUN_LINE_RE.match(l) for l in input_lines] if m] run_lines = [raw_lines[0]] if len(raw_lines) > 0 else [] for l in raw_lines[1:]: if run_lines[-1].endswith("\\"): run_lines[-1] = run_lines[-1].rstrip("\\") + " " + l else: run_lines.append(l) if args.verbose: print >>sys.stderr, 'Found %d RUN lines:' % (len(run_lines),) for l in run_lines: print >>sys.stderr, ' RUN: ' + l run_list = [] for l in run_lines: commands = [cmd.strip() for cmd in l.split('|', 1)] llc_cmd = commands[0] if llc_cmd.startswith("%cheri_"): llc_cmd = llc_cmd.replace("%cheri_purecap_llc", "llc -mtriple=cheri-unknown-freebsd -target-abi purecap -relocation-model pic -mcpu=cheri128 -mattr=+cheri128") llc_cmd = llc_cmd.replace("%cheri_llc", "llc -mtriple=cheri-unknown-freebsd -mcpu=cheri128 -mattr=+cheri128") llc_cmd = llc_cmd.replace("%cheri128_llc", "llc -mtriple=cheri-unknown-freebsd -mcpu=cheri128 -mattr=+cheri128") llc_cmd = llc_cmd.replace("%cheri256_llc", "llc -mtriple=cheri-unknown-freebsd -mcpu=cheri256 -mattr=+cheri256") triple_in_cmd = None m = common.TRIPLE_ARG_RE.search(llc_cmd) if m: triple_in_cmd = m.groups()[0] filecheck_cmd = '' if len(commands) > 1: filecheck_cmd = commands[1] if filecheck_cmd.startswith("%cheri_FileCheck"): filecheck_cmd = filecheck_cmd.replace("%cheri_FileCheck", "FileCheck '-D$CAP_SIZE=16'") if not llc_cmd.startswith('llc '): print >>sys.stderr, 'WARNING: Skipping non-llc RUN line: ' + l continue if not filecheck_cmd.startswith('FileCheck '): print >>sys.stderr, 'WARNING: Skipping non-FileChecked RUN line: ' + l continue llc_cmd_args = llc_cmd[len('llc'):].strip() llc_cmd_args = llc_cmd_args.replace('< %s', '').replace('%s', '').strip() check_prefixes = [item for m in common.CHECK_PREFIX_RE.finditer(filecheck_cmd) for item in m.group(1).split(',')] if not check_prefixes: check_prefixes = ['CHECK'] # FIXME: We should use multiple check prefixes to common check lines. For # now, we just ignore all but the last. run_list.append((check_prefixes, llc_cmd_args, triple_in_cmd)) func_dict = {} for p in run_list: prefixes = p[0] for prefix in prefixes: func_dict.update({prefix: dict()}) for prefixes, llc_args, triple_in_cmd in run_list: if args.verbose: print >>sys.stderr, 'Extracted LLC cmd: llc ' + llc_args print >>sys.stderr, 'Extracted FileCheck prefixes: ' + str(prefixes) raw_tool_output = common.invoke_tool(args.llc_binary, llc_args, test) if not (triple_in_cmd or triple_in_ir): print >>sys.stderr, "Cannot find a triple. Assume 'x86'" asm.build_function_body_dictionary_for_triple(args, raw_tool_output, triple_in_cmd or triple_in_ir or 'x86', prefixes, func_dict) is_in_function = False is_in_function_start = False func_name = None prefix_set = set([prefix for p in run_list for prefix in p[0]]) if args.verbose: print >>sys.stderr, 'Rewriting FileCheck prefixes: %s' % (prefix_set,) output_lines = [] output_lines.append(autogenerated_note) for input_line in input_lines: if is_in_function_start: if input_line == '': continue if input_line.lstrip().startswith(';'): m = common.CHECK_RE.match(input_line) if not m or m.group(1) not in prefix_set: output_lines.append(input_line) continue # Print out the various check lines here. asm.add_asm_checks(output_lines, ';', run_list, func_dict, func_name) is_in_function_start = False if is_in_function: if common.should_add_line_to_output(input_line, prefix_set): # This input line of the function body will go as-is into the output. output_lines.append(input_line) else: continue if input_line.strip() == '}': is_in_function = False continue # Discard any previous script advertising. if input_line.startswith(ADVERT): continue # If it's outside a function, it just gets copied to the output. output_lines.append(input_line) m = common.IR_FUNCTION_RE.match(input_line) if not m: continue func_name = m.group(1) if args.function is not None and func_name != args.function: # When filtering on a specific function, skip all others. continue is_in_function = is_in_function_start = True if args.verbose: print>>sys.stderr, 'Writing %d lines to %s...' % (len(output_lines), test) with open(test, 'wb') as f: f.writelines([l + '\n' for l in output_lines])
def _get_block_infos(run_infos, test_path, args, common_prefix): # noqa """ For each run line, run the tool with the specified args and collect the output. We use the concept of 'blocks' for uniquing, where a block is a series of lines of text with no more than one newline character between each one. For example: This is one block This is another block This is yet another block We then build up a 'block_infos' structure containing a dict where the text of each block is the key and a list of the sets of prefixes that may generate that particular block. This then goes through a series of transformations to minimise the amount of CHECK lines that need to be written by taking advantage of common prefixes. """ def _block_key(tool_args, prefixes): """ Get a hashable key based on the current tool_args and prefixes. """ return ' '.join([tool_args] + prefixes) all_blocks = {} max_block_len = 0 # A cache of the furthest-back position in any block list of the first # instance of each block, indexed by the block itself. farthest_indexes = defaultdict(int) # Run the tool for each run line to generate all of the blocks. for prefixes, tool_args in run_infos: key = _block_key(tool_args, prefixes) raw_tool_output = common.invoke_tool(args.llvm_mca_binary, tool_args, test_path) # Replace any lines consisting of purely whitespace with empty lines. raw_tool_output = '\n'.join(line if line.strip() else '' for line in raw_tool_output.splitlines()) # Split blocks, stripping all trailing whitespace, but keeping preceding # whitespace except for newlines so that columns will line up visually. all_blocks[key] = [b.lstrip('\n').rstrip() for b in raw_tool_output.split('\n\n')] max_block_len = max(max_block_len, len(all_blocks[key])) # Attempt to align matching blocks until no more changes can be made. made_changes = True while made_changes: made_changes = _align_matching_blocks(all_blocks, farthest_indexes) # If necessary, pad the lists of blocks with empty blocks so that they are # all the same length. for key in all_blocks: len_to_pad = max_block_len - len(all_blocks[key]) all_blocks[key] += [''] * len_to_pad # Create the block_infos structure where it is a nested dict in the form of: # block number -> block text -> list of prefix sets block_infos = defaultdict(lambda: defaultdict(list)) for prefixes, tool_args in run_infos: key = _block_key(tool_args, prefixes) for block_num, block_text in enumerate(all_blocks[key]): block_infos[block_num][block_text].append(set(prefixes)) # Now go through the block_infos structure and attempt to smartly prune the # number of prefixes per block to the minimal set possible to output. for block_num in range(len(block_infos)): # When there are multiple block texts for a block num, remove any # prefixes that are common to more than one of them. # E.g. [ [{ALL,FOO}] , [{ALL,BAR}] ] -> [ [{FOO}] , [{BAR}] ] all_sets = [s for s in block_infos[block_num].values()] pruned_sets = [] for i, setlist in enumerate(all_sets): other_set_values = set([elem for j, setlist2 in enumerate(all_sets) for set_ in setlist2 for elem in set_ if i != j]) pruned_sets.append([s - other_set_values for s in setlist]) for i, block_text in enumerate(block_infos[block_num]): # When a block text matches multiple sets of prefixes, try removing any # prefixes that aren't common to all of them. # E.g. [ {ALL,FOO} , {ALL,BAR} ] -> [{ALL}] common_values = set.intersection(*pruned_sets[i]) if common_values: pruned_sets[i] = [common_values] # Everything should be uniqued as much as possible by now. Apply the # newly pruned sets to the block_infos structure. # If there are any blocks of text that still match multiple prefixes, # output a warning. current_set = set() for s in pruned_sets[i]: s = sorted(list(s)) if s: current_set.add(s[0]) if len(s) > 1: _warn('Multiple prefixes generating same output: {} ' '(discarding {})'.format(','.join(s), ','.join(s[1:]))) if block_text and not current_set: raise Error( 'block not captured by existing prefixes:\n\n{}'.format(block_text)) block_infos[block_num][block_text] = sorted(list(current_set)) # If we have multiple block_texts, try to break them down further to avoid # the case where we have very similar block_texts repeated after each # other. if common_prefix and len(block_infos[block_num]) > 1: # We'll only attempt this if each of the block_texts have the same number # of lines as each other. same_num_Lines = (len(set(len(k.splitlines()) for k in block_infos[block_num].keys())) == 1) if same_num_Lines: breakdown = _break_down_block(block_infos[block_num], common_prefix) if breakdown: block_infos[block_num] = breakdown return block_infos