def do_file(self, args, opts=None): """ Load an ARM binary file for emulation and debugging. To list ARMjitsu supported binary formats invoke: (armjitsu) file --list """ BIN_TYPE = armcpu_const.RAW_BIN if opts.raw: BIN_TYPE = armcpu_const.RAW_BIN elif opts.elf: BIN_TYPE = armcpu_const.ELF_BIN line = args if not line: print colorful.yellow("Supply the filename of the binary you wish to load please.") return None file_name = line if is_file(line) else None if not file_name or not BIN_TYPE: print colorful.yellow("Error with supplied filename.") return False self.arm_dbg = armcpu.ArmCPU(file_name, BIN_TYPE) self.bin_loaded = True print colorful.base1("Loaded binary file: {}".format(file_name))
def find_param_dupls(src_file): byname = args['--byname'] spentries = rsparam.find_duplicates(src_file, byname=byname) # write output to file if requested out_file = check_write_results(spentries.params) if out_file: report_filenames(out_file, title='wrote results to: ') return duplparam = 'name' if byname else 'guid' dupldata = [] report(colorful.yellow('\nduplicate params by {}:'.format(duplparam))) for dlist in spentries.params: for d in dlist: dupldata.append( (d.name if byname else d.guid, d.guid if byname else d.name, d.datatype, d.group, d.lineno)) print( colorful.yellow('\nduplicates by {}: {}'.format( duplparam, dupldata[0][0]))) if args['--sortby'] == 'group': dupldata = sorted(dupldata, key=lambda x: str(x[3])) print( tabulate(dupldata, headers=('Name' if byname else 'Guid', 'Guid' if byname else 'Name', 'Datatype', 'Group', 'Line #')))
def do_run(self, line): """Begins execution of ARM binary.""" if not self.bin_running: self.bin_running = True else: print colorful.yellow("Process is already running.") return None self.arm_dbg.start_execution()
def _(value: Array, seen: Set[int], show_values: bool = True, indent: int = 0, batch_dims: Optional[Tuple[Optional[int], ...]] = None) -> str: retval = cf.yellow(f"NumPy Array {value.shape} {value.dtype}{_batch_str(batch_dims)}") + "\n" return retval + _show_array(indent + 1, value)
def show(): """ Show the modifiers and colors """ # modifiers sys.stdout.write(colorful.bold('bold') + ' ') sys.stdout.write(colorful.dimmed('dimmed') + ' ') sys.stdout.write(colorful.italic('italic') + ' ') sys.stdout.write(colorful.underlined('underlined') + ' ') sys.stdout.write(colorful.inversed('inversed') + ' ') sys.stdout.write(colorful.concealed('concealed') + ' ') sys.stdout.write(colorful.struckthrough('struckthrough') + '\n') # foreground colors sys.stdout.write(colorful.red('red') + ' ') sys.stdout.write(colorful.green('green') + ' ') sys.stdout.write(colorful.yellow('yellow') + ' ') sys.stdout.write(colorful.blue('blue') + ' ') sys.stdout.write(colorful.magenta('magenta') + ' ') sys.stdout.write(colorful.cyan('cyan') + ' ') sys.stdout.write(colorful.white('white') + '\n') # background colors sys.stdout.write(colorful.on_red('red') + ' ') sys.stdout.write(colorful.on_green('green') + ' ') sys.stdout.write(colorful.on_yellow('yellow') + ' ') sys.stdout.write(colorful.on_blue('blue') + ' ') sys.stdout.write(colorful.on_magenta('magenta') + ' ') sys.stdout.write(colorful.on_cyan('cyan') + ' ') sys.stdout.write(colorful.on_white('white') + '\n')
def test_step_not_match(sentence, expected_not_matching_step, steps): step_to_print = ( colorful.cyan(expected_not_matching_step) if expected_not_matching_step else "ANY" ) sys.stdout.write( '{0} STEP "{1}" SHOULD NOT MATCH {2} '.format( colorful.yellow(">>"), colorful.cyan(sentence), step_to_print ) ) result = match_step(sentence, steps) if result: if ( not expected_not_matching_step or result.func.__name__ == expected_not_matching_step ): output_failure( None, [ "Expected sentence did match {0} but it shouldn't".format( expected_not_matching_step ) ], ) return False print(u(colorful.bold_green("✔"))) return True
def meta(item): try: item_stats = os.stat(item.resolved_path) item_byte_size = item_stats[stat.ST_SIZE] item_created_at_ms = item_stats[stat.ST_CTIME] item_created_at = time.strftime( '%a %b %d %Y %H:%M:%S GMT%z (%Z)', time.localtime(item_created_at_ms)) item_data = None if item.is_file: item_data = open(item.resolved_path, 'r').read() return ''.join([ str(color.darkGray('- ')), ' '.join([ str( color.white_on_darkMagenta( ' {0} bytes '.format(item_byte_size))), str(item_data and json.dumps({'data': item_data}) or ''), str(color.darkCyan('{0}'.format(item_created_at))), ]) ]) except Exception as error: return color.yellow('(!) could not read/resolve')
def colored_grade(grade): if grade < 4: return colorful.red(grade) elif grade < 4.5: return colorful.yellow(grade) else: return colorful.green(grade)
def first_run_tui(): ''' first setups and creating key's and personal passwords ''' import os if (platform.system() == "Darwin"): pass if (platform.system() == "Windows"): os.system("pip install -r requirements.txt") # pass if (platform.system() == "Linux"): pass from cryptography.fernet import Fernet cryptocut_art = text2art("cRYPTOcUT", "confused3") setup_art = text2art("setup") print('\n', cf.green(cryptocut_art)) print(cf.cyan(setup_art)) user_pass = input( cf.yellow("enter your password for symmetric encryption : ")) user_key = (create_user_key(user_pass)) # generate a random secret for user using crypto lib fernet = Fernet(user_key) random_key = Fernet.generate_key() symmetric_key = fernet.encrypt(random_key) # write the random secret as symmetric_key in symmetric.key # TODO : we can use a diffrent path for this with open("symmetric.key", "wb") as key_file: key_file.write(symmetric_key) print(cf.green("done! \nsuccessfully initialized the symmetric key"))
def test_step_match(sentence, expected_step, expected_arguments, steps): sys.stdout.write('{0} STEP "{1}" SHOULD MATCH {2} '.format( colorful.yellow('>>'), colorful.cyan(sentence), colorful.cyan(expected_step))) result = match_step(sentence, steps) if not result: output_failure( None, ['Expected sentence didn\'t match any step implementation']) return False if expected_step != result.func.__name__: output_failure(result.func, [ 'Expected sentence matched {0} instead of {1}'.format( result.func.__name__, expected_step) ]) return False if expected_arguments: arguments = merge_step_args(result) expected_arguments = { k: v for expected_arguments in expected_arguments for k, v in expected_arguments.items() } argument_errors = check_step_arguments(expected_arguments, arguments) if argument_errors: output_failure(result.func, argument_errors) return False print(u(colorful.bold_green(u'✔'))) return True
def test_step_matches(match_config, steps): """ Test if the given match config matches the actual matched step implementations. """ failed = 0 passed = 0 for item in match_config: validate_config_item(item) sentence = item['sentence'] expected_step = item['should_match'] sys.stdout.write('{0} STEP "{1}" SHOULD MATCH {2} '.format( colorful.yellow('>>'), colorful.cyan(sentence), colorful.cyan(expected_step))) result = match_step(item['sentence'], steps) if not result: output_failure( None, ['Expected sentence didn\'t match any step implementation']) failed += 1 continue if expected_step != result.func.__name__: output_failure(result.func, [ 'Expected sentence matched {0} instead of {1}'.format( result.func.__name__, expected_step) ]) failed += 1 continue expected_arguments = item.get('with_arguments') if expected_arguments: arguments = merge_step_args(result) expected_arguments = { k: v for expected_arguments in expected_arguments for k, v in expected_arguments.items() } argument_errors = check_step_arguments(expected_arguments, arguments) if argument_errors: output_failure(result.func, argument_errors) failed += 1 continue # check if arguments match print(u(colorful.bold_green(u'✔'))) passed += 1 return failed, passed
def write_stdout(level, message): prefix = colorful.bold_yellow(u'\u229b INFO :') if level == 'WARNING': prefix = colorful.bold_red(u'\u2757 WARNING :') message = colorful.yellow(message) added_prefix = u'\n\t\t{}\t{} '.format(colorful.gray(u'\u2502'),' '*len(prefix)) message = message.split('\n') console_write(u'\t\t\u251c\u2501\t{} {}'.format(prefix, added_prefix.join(message)))
def find_matching(src_file): search_str = args['<regex_pattern>'] spentries = rsparam.find(src_file, search_str, encoding=args['--encode']) # write output to file if requested out_entries = [] if not args['--params']: out_entries.extend(spentries.groups) if not args['--groups']: out_entries.extend(spentries.params) out_file = check_write_results(out_entries) if out_file: report_filenames(out_file, title='wrote results to: ') return if spentries.groups and not args['--params']: report(colorful.yellow('\ngroups matching: {}'.format(search_str))) list_groups(None, spgroups=spentries.groups) if spentries.params and not args['--groups']: report(colorful.yellow('\nparams matching: {}'.format(search_str))) list_params(None, sparams=spentries.params)
def comp(first_file, second_file): uniq1, uniq2 = rsparam.compare(first_file, second_file, encoding=args['--encode']) # write output to files if requested if uniq1.groups and not args['--params'] and not args['--second']: report(colorful.yellow('\nunique groups in first')) args['--output'] = 'uniq_groups_1.txt' if args['--OUTPUT'] else None list_groups(None, spgroups=uniq1.groups) if uniq2.groups and not args['--params'] and not args['--first']: report(colorful.yellow('\nunique groups in second')) args['--output'] = 'uniq_groups_2.txt' if args['--OUTPUT'] else None list_groups(None, spgroups=uniq2.groups) if uniq1.params and not args['--groups'] and not args['--second']: report(colorful.yellow('\nunique parameters in first')) args['--output'] = 'uniq_params_1.txt' if args['--OUTPUT'] else None list_params(None, sparams=uniq1.params) if uniq2.params and not args['--groups'] and not args['--first']: report(colorful.yellow('\nunique parameters in second')) args['--output'] = 'uniq_params_2.txt' if args['--OUTPUT'] else None list_params(None, sparams=uniq2.params)
def test_step_not_match(sentence, expected_not_matching_step, steps): step_to_print = colorful.cyan( expected_not_matching_step) if expected_not_matching_step else 'ANY' sys.stdout.write('{0} STEP "{1}" SHOULD NOT MATCH {2} '.format( colorful.yellow('>>'), colorful.cyan(sentence), step_to_print)) result = match_step(sentence, steps) if result: if not expected_not_matching_step or result.func.__name__ == expected_not_matching_step: output_failure(None, [ 'Expected sentence did match {0} but it shouldn\'t'.format( expected_not_matching_step) ]) return False print(u(colorful.bold_green(u'✔'))) return True
def test_step_not_match(sentence, expected_not_matching_step, steps): step_to_print = (colorful.cyan(expected_not_matching_step) if expected_not_matching_step else "ANY") sys.stdout.write('{0} STEP "{1}" SHOULD NOT MATCH {2} '.format( colorful.yellow(">>"), colorful.cyan(sentence), step_to_print)) result = match_step(sentence, steps) if result: if (not expected_not_matching_step or result.func.__name__ == expected_not_matching_step): output_failure( None, [ "Expected sentence did match {0} but it shouldn't".format( expected_not_matching_step) ], ) return False print(u(colorful.bold_green("✔"))) return True
def test_step_match(sentence, expected_step, expected_arguments, steps): sys.stdout.write( '{0} STEP "{1}" SHOULD MATCH {2} '.format( colorful.yellow(">>"), colorful.cyan(sentence), colorful.cyan(expected_step) ) ) result = match_step(sentence, steps) if not result: output_failure(None, ["Expected sentence didn't match any step implementation"]) return False if expected_step != result.func.__name__: output_failure( result.func, [ "Expected sentence matched {0} instead of {1}".format( result.func.__name__, expected_step ) ], ) return False if expected_arguments: arguments = merge_step_args(result) expected_arguments = { k: v for expected_arguments in expected_arguments for k, v in expected_arguments.items() } argument_errors = check_step_arguments(expected_arguments, arguments) if argument_errors: output_failure(result.func, argument_errors) return False print(u(colorful.bold_green("✔"))) return True
cf.use_style('solarized') print(cf.bold_yellow(f'Dict size: {len(english_words)}')) print('depth is', depth) print('source is ', source) print('testing mode is', testing_mode) if not grid: if not testing_mode: print( cf.red_bold( 'Failed to recognize the grid. Try again on a different puzzle' )) import sys sys.exit(1) grid = populate() print("Grid is") print(np.asarray(grid)) # TODO: populate grid # TODO: find the pixel values of each grid value (could be manual) print(pyautogui.size()) search(grid, depth) print( cf.green( f'we have found {len(words_found)} words... running draw algorithm' )) words_found = sorted(words_found, key=lambda k: -len(k[0])) # max score? if testing_mode or source == 'generated': print(cf.yellow(f'{words_found}')) if not (source == 'generated'): draw(words_found) # check privacy settings before drawing
# Scan codebase for violations violations_found = False for file_path in args.files: with open(file_path) as file: for line_number, predicate in match_predicates(file): actual_arity = len(predicate.arguments) try: expected_arity = len(predicate_signatures[predicate.name]) assert expected_arity == actual_arity except KeyError: # Missing annotation predicate_signature = colorful.bold_yellow( f'{predicate.name}/{actual_arity}') print_message(colorful.yellow( f'Missing annotation for {predicate_signature}'), file_name=file_path, line_number=line_number) violations_found = True except AssertionError: # Annotation violation actual_signature = colorful.bold_red( f'{predicate.name}/{actual_arity}') expected_signature = colorful.bold_red( f'{predicate.name}/{expected_arity}') print_message(colorful.red( f'{actual_signature} should be {expected_signature}'), file_name=file_path, line_number=line_number) violations_found = True
def style_warn(text): return colorful.yellow(f'{text}', nested=True)
print(cf.red('red' + cf.white(' white ', nested=True) + 'red')) # combine styles with strings print(cf.bold & cf.red | 'Hello World') # use true colors cf.use_true_colors() # extend default color palette cf.update_palette({'mint': '#c5e8c8'}) print(cf.mint_on_snow('Wow, this is actually mint')) # choose a predefined style cf.use_style('solarized') # print the official solarized colors print(cf.yellow('yellow'), cf.orange('orange'), cf.red('red'), cf.magenta('magenta'), cf.violet('violet'), cf.blue('blue'), cf.cyan('cyan'), cf.green('green')) # directly print with colors cf.print('{c.bold_blue}Hello World{c.reset}') # choose specific color mode for one block with cf.with_8_ansi_colors() as c: print(c.bold_green('colorful is awesome!')) # create and choose your own color palette MY_COMPANY_PALETTE = { 'companyOrange': '#f4b942', 'companyBaige': '#e8dcc5'
def warn(self, text): if not self.silent: print( colorful.magenta(self.name) + " " + colorful.yellow("warn") + " " + str(text))
def warning(self, msg, *args, **kwargs): """Prints a formatted warning message. For arguments, see `_format_msg`. """ self.print(cf.yellow(msg), *args, **kwargs)
def test_step_matches_configs( match_config_files, basedirs, cover_min_percentage=None, cover_show_missing=False ): """ Test if the given match config files matches the actual matched step implementations. """ if cover_min_percentage is not None and float(cover_min_percentage) > 100: sys.stderr.write( str( colorful.magenta( "You are a little cocky to think you can reach a minimum coverage of {0:.2f}%\n".format( float(cover_min_percentage) ) ) ) ) return 3 # load user's custom python files for basedir in basedirs: load_modules(basedir) steps = StepRegistry().steps if not steps: sys.stderr.write( str( colorful.magenta( "No step implementations found in {0}, thus doesn't make sense to continue".format( basedirs ) ) ) ) return 4 failed = 0 passed = 0 covered_steps = set() for match_config_file in match_config_files: # load the given match config file with codecs.open(match_config_file, "r", "utf-8") as f: match_config = yaml.safe_load(f) if not match_config: print( colorful.magenta( "No sentences found in {0} to test against".format( match_config_file ) ) ) return 5 print( colorful.yellow( "Testing sentences from {0}:".format( colorful.bold_yellow(match_config_file) ) ) ) failed_sentences, passed_senteces = test_step_matches(match_config, steps) failed += failed_sentences passed += passed_senteces covered_steps = covered_steps.union( x["should_match"] for x in match_config if "should_match" in x ) # newline sys.stdout.write("\n") report = colorful.bold_white("{0} sentences (".format(failed + passed)) if passed > 0: report += colorful.bold_green("{0} passed".format(passed)) if passed > 0 and failed > 0: report += colorful.bold_white(", ") if failed > 0: report += colorful.bold_red("{0} failed".format(failed)) report += colorful.bold_white(")") print(report) step_coverage = 100.0 / len(steps) * len(covered_steps) coverage_report = colorful.bold_white( "Covered {0} of {1} step implementations".format(len(covered_steps), len(steps)) ) ret = 0 if failed == 0 else 1 if cover_min_percentage: coverage_color = ( colorful.bold_green if step_coverage >= float(cover_min_percentage) else colorful.bold_red ) coverage_report += colorful.bold_white(" (coverage: ") coverage_report += coverage_color("{0:.2f}%".format(step_coverage)) if float(cover_min_percentage) > step_coverage: coverage_report += colorful.bold_white( ", expected a minimum of {0}".format( colorful.bold_green(cover_min_percentage + "%") ) ) if failed == 0: ret = 2 # if tests have passed and coverage is too low we fail with exit code 2 coverage_report += colorful.bold_white(")") print(coverage_report) if cover_show_missing: missing_steps = get_missing_steps(steps, covered_steps) if missing_steps: missing_step_report = colorful.bold_yellow("Missing steps:\n") for step in missing_steps: missing_step_report += "- {0} at ".format(colorful.cyan(step[0])) missing_step_report += colorful.cyan(step[1]) + "\n" sys.stdout.write(missing_step_report) return ret
def generate(yaml_file): with open('%s' % (yaml_file)) as fp: yaml_data = yaml.safe_load(fp) topic_list = get_topics(yaml_data) print('') print('All topics: {}'.format(colorful.yellow(topic_list))) cluster_list = get_clusters(yaml_data) print('All activated clusters: {}'.format(colorful.yellow(cluster_list))) cluster_cpu_map = get_cpus(yaml_data) print('All cluster-cpu mappings: {}'.format( colorful.yellow(cluster_cpu_map))) print('') subscriber_info_list_map = { cluster: get_subscriber_infos(yaml_data, cluster) for cluster in itertools.chain([0], cluster_list) } pub_topic_infos, sub_topic_infos = get_topic_infos(yaml_data) type_package_list = list( set([ topic_info['type_package'] for topic_info in itertools.chain(pub_topic_infos, sub_topic_infos) if len(topic_info['type_package']) > 0 ])) template_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, 'config', 'posix', 'template') env = Environment(loader=FileSystemLoader(template_path), trim_blocks=True, lstrip_blocks=True) env.globals['datetime'] = datetime env.globals['input_file'] = os.path.relpath(yaml_file) output_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir) filepath = 'roslite/src/generated/init.cpp' with open(os.path.join(output_dir, filepath), 'w') as f: f.write( env.get_template('roslite/init.cpp').render( topic_list=topic_list, cluster_list=itertools.chain([0], cluster_list), subscriber_info_list_map=subscriber_info_list_map)) commons.print_generated_file(filepath) filepath = 'ros_src/generated/init_threads.cpp' with open(os.path.join(output_dir, filepath), 'w') as f: f.write( env.get_template('ros_src/init_threads.cpp').render( node_list=yaml_data, cluster_list=cluster_list)) commons.print_generated_file(filepath) filepath = 'ros_src/generated/roslite_node.cmake' with open(os.path.join(output_dir, filepath), 'w') as f: f.write( env.get_template('ros_src/roslite_node.cmake').render( node_list=yaml_data, cluster_list=cluster_list)) commons.print_generated_file(filepath) filepath = 'ros_src/generated/roslite_app.cmake' with open(os.path.join(output_dir, filepath), 'w') as f: f.write( env.get_template('ros_src/roslite_app.cmake').render( node_list=yaml_data, cluster_list=cluster_list)) commons.print_generated_file(filepath) filepath = 'roslite/generated/roslite.cmake' with open(os.path.join(output_dir, filepath), 'w') as f: f.write( env.get_template('roslite/roslite.cmake').render( node_list=yaml_data, cluster_list=cluster_list)) commons.print_generated_file(filepath) filepath = 'roslite/include/ros/generated/main_replacer.h' with open(os.path.join(output_dir, filepath), 'w') as f: f.write( env.get_template('roslite/main_replacer.h').render( node_list=yaml_data, cluster_list=cluster_list)) commons.print_generated_file(filepath) filepath = 'roslite/scripts/roslite_cli/generated/rosl_run' with open(os.path.join(output_dir, filepath), 'w') as f: f.write( env.get_template('roslite/rosl_run').render( node_list=yaml_data, cluster_list=cluster_list, cluster_cpu_map=cluster_cpu_map)) current_umask = os.umask(0o0) os.umask(current_umask) os.chmod(os.path.join(output_dir, filepath), (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) & ~current_umask) commons.print_generated_file(filepath) filepath = 'ros_bridge/generated/ros_bridge_generated.cmake' with open(os.path.join(output_dir, filepath), 'w') as f: f.write( env.get_template('ros_bridge/ros_bridge_generated.cmake').render( type_package_list=type_package_list)) commons.print_generated_file(filepath) filepath = 'ros_bridge/generated/ros_bridge_generated.cpp' with open(os.path.join(output_dir, filepath), 'w') as f: f.write( env.get_template('ros_bridge/ros_bridge_generated.cpp').render( pub_topic_infos=pub_topic_infos, sub_topic_infos=sub_topic_infos)) commons.print_generated_file(filepath)
def warning(self, msg, *args, **kwargs): self._print(_format_msg(cf.yellow(msg), *args, **kwargs))
def warning(self, msg, *args, **kwargs): self.print(cf.yellow(msg, *args, **kwargs))
def async_delete_file(up, authstr, start_path, f): global session if f['type'] != u'N': return try: #url = 'http://%s/%s%s/%s' % (upyun.ED_AUTO, up.bucket, start_path, f['name']) url = 'http://%s/%s%s/%s' % (upyun.ED_AUTO, up.service, start_path, f['name']) except AttributeError: traceback.print_exc() print 'up:', up, dir(up) sys.exit(0) headers = {'Authorization': 'Basic %s' % authstr, 'x-upyun-async': 'true'} try: r = session.delete(url, headers=headers) except requests.exceptions.ConnectionError, e: # e.args(tuple), # e.errno(None), # e.filename(None), # e.message(requests.packages.urllib3.exceptions.MaxRetryError), # e.message.args(tuple), # e.message.message(str), # e.message.pool(requests.packages.urllib3.connectionpool.HTTPConnectionPool), # e.message.reason(requests.packages.urllib3.exceptions.ProtocolError), # e.message.url(str) # e.request(requests.models.PreparedRequest), # e.response(None), # e.strerror(None) if hasattr(e, 'message') and isinstance( e.message, requests.packages.urllib3.exceptions.MaxRetryError ) and hasattr(e.message, 'reason'): if isinstance(e.message.reason, requests.packages.urllib3.exceptions.ProtocolError): handle_protocolerror(e.message.reason) elif isinstance( e.message.reason, requests.packages.urllib3.exceptions.NewConnectionError): # e.message.reason.args(tuple) # e.message.reason.message(str): <...0x7fbf129ec9d0>: Failed to establish a new connection: [Errno -2] Name or service not known # e.message.reason.pool(requests.packages.urllib3.connection.HTTPConnection) # 如果出现DNS错误,就不断重试解析upyun域名,直到能够解析为止 if e.message.reason.message.endswith( '[Errno -2] Name or service not known'): while True: try: print colorful.yellow( 'DNS error, starting resolving hostname...') socket.gethostbyname(upyun.ED_AUTO) break except socket.gaierror, e: if e.strerror == 'Name or service not known': pass else: print e return for attr in ('args', 'message', 'pool'): print 'e.message.reason.%s:' % attr, type( getattr(e.message.reason, attr)), getattr(e.message.reason, attr) else: logger.warning('e.message.reason: %s' % type(e.message.reason)) sys.exit(0)
parameters.append('--wip') if os.path.isfile('{}/.expected'.format(directory)): with open('{}/.expected'.format(directory)) as expected_file: expected = expected_file.read().split('\n') if not os.path.isfile('{}/.no_early_exit'.format(directory)): parameters.append('-q') parameters.extend([ '-f', '{}'.format(directory), '-p', '{}/plan.out.json'.format(directory) ]) try: print('Running {}.'.format(colorful.yellow(test_dir))) # TODO: Add multithreading here if we have more than 50+ integration tests ? test_process = subprocess.run(parameters, check=True, # shell=True, stdout=subprocess.PIPE, universal_newlines=True, ) if os.environ.get('DEBUG'): print('Output: {}'.format(colorful.grey(test_process.stdout))) if test_process.returncode == 0: if expected: for each in expected: if re.findall(each, str(test_process.stdout)):
def test_step_matches_configs(match_config_files, basedirs, cover_min_percentage=None, cover_show_missing=False): """ Test if the given match config files matches the actual matched step implementations. """ if cover_min_percentage is not None and float(cover_min_percentage) > 100: sys.stderr.write( str( colorful.magenta( 'You are a little cocky to think you can reach a minimum coverage of {0:.2f}%\n' .format(float(cover_min_percentage))))) return 3 # load user's custom python files for basedir in basedirs: load_modules(basedir) steps = StepRegistry().steps if not steps: sys.stderr.write( str( colorful.magenta( 'No step implementations found in {0}, thus doesn\'t make sense to continue' .format(basedirs)))) return 4 failed = 0 passed = 0 covered_steps = set() for match_config_file in match_config_files: # load the given match config file with codecs.open(match_config_file, "r", "utf-8") as f: match_config = yaml.safe_load(f) if not match_config: print( colorful.magenta( 'No sentences found in {0} to test against'.format( match_config_file))) return 5 print( colorful.yellow('Testing sentences from {0}:'.format( colorful.bold_yellow(match_config_file)))) failed_sentences, passed_senteces = test_step_matches( match_config, steps) failed += failed_sentences passed += passed_senteces covered_steps = covered_steps.union(x['should_match'] for x in match_config if 'should_match' in x) # newline sys.stdout.write('\n') report = colorful.bold_white('{0} sentences ('.format(failed + passed)) if passed > 0: report += colorful.bold_green('{0} passed'.format(passed)) if passed > 0 and failed > 0: report += colorful.bold_white(', ') if failed > 0: report += colorful.bold_red('{0} failed'.format(failed)) report += colorful.bold_white(')') print(report) step_coverage = 100.0 / len(steps) * len(covered_steps) coverage_report = colorful.bold_white( 'Covered {0} of {1} step implementations'.format( len(covered_steps), len(steps))) ret = 0 if failed == 0 else 1 if cover_min_percentage: coverage_color = colorful.bold_green if step_coverage >= float( cover_min_percentage) else colorful.bold_red coverage_report += colorful.bold_white(' (coverage: ') coverage_report += coverage_color('{0:.2f}%'.format(step_coverage)) if float(cover_min_percentage) > step_coverage: coverage_report += colorful.bold_white( ', expected a minimum of {0}'.format( colorful.bold_green(cover_min_percentage + '%'))) if failed == 0: ret = 2 # if tests have passed and coverage is too low we fail with exit code 2 coverage_report += colorful.bold_white(')') print(coverage_report) if cover_show_missing: missing_steps = get_missing_steps(steps, covered_steps) if missing_steps: missing_step_report = colorful.bold_yellow('Missing steps:\n') for step in missing_steps: missing_step_report += '- {0} at '.format( colorful.cyan(step[0])) missing_step_report += colorful.cyan(step[1]) + '\n' sys.stdout.write(missing_step_report) return ret
test_result = colorful.orange('skipped') else: if os.path.isfile('{}/.failure'.format(directory)): parameters.append('--wip') if os.path.isfile('{}/.expected'.format(directory)): with open('{}/.expected'.format(directory)) as expected_file: expected = expected_file.read() parameters.extend([ '-f', '{}'.format(directory), '-p', '{}/plan.out.json'.format(directory) ]) try: print('Running {}.'.format(colorful.yellow(test_dir))) # TODO: Add multithreading here if we have more than 50+ integration tests ? test_process = subprocess.run( parameters, check=True, # shell=True, stdout=subprocess.PIPE, universal_newlines=True, ) if os.environ.get('DEBUG'): print('Output: {}'.format(colorful.grey(test_process.stdout))) if test_process.returncode == 0: if expected: if re.findall(expected, str(test_process.stdout)):
def run_tests(tests): test_summary = [] failure_happened = False for test_dir in tests: parameters = ['terraform-compliance', '--no-ansi'] # Ignore if there are any .terraform folders in this level. They can build up when writing tests. if '.terraform' in test_dir: continue feature_directory = '{}/../..'.format(test_dir) test_result = '' expected = '' unexpected = '' if not os.path.isfile('{}/plan.out.json'.format(test_dir)): test_result = colorful.orange('skipped') else: if os.path.isfile('{}/.failure'.format(test_dir)): parameters.append('--wip') if os.path.isfile('{}/.expected'.format(test_dir)): with open('{}/.expected'.format(test_dir)) as expected_file: expected = expected_file.read().split('\n') if os.path.isfile('{}/.unexpected'.format(test_dir)): with open( '{}/.unexpected'.format(test_dir)) as unexpected_file: unexpected = unexpected_file.read().split('\n') if not os.path.isfile('{}/.no_early_exit'.format(test_dir)): parameters.append('-q') parameters.extend([ '-f', '{}'.format(feature_directory), '-p', '{}/plan.out.json'.format(test_dir) ]) try: print('Running {}.'.format(colorful.yellow(test_dir))) # TODO: Add multithreading here if we have more than 50+ integration tests ? test_process = subprocess.run( parameters, check=True, # shell=True, stdout=subprocess.PIPE, universal_newlines=True, ) if os.environ.get('DEBUG'): print('Output: {}'.format( colorful.grey(test_process.stdout))) if test_process.returncode == 0: if expected: expected_failures = [ exp for exp in expected if not re.findall(exp, str(test_process.stdout)) ] if expected_failures: print('\nOutput: {}'.format(test_process.stdout)) print('Can not find ;') for failure in expected_failures: print('\t{}'.format(colorful.yellow(failure))) print('in the test output.\n') test_result = colorful.red('failed') failure_happened = True if unexpected: unexpected_failures = [ unexp for unexp in unexpected if re.findall(unexp, str(test_process.stdout)) ] if unexpected_failures: print('\nOutput: {}'.format(test_process.stdout)) print('Found;') for failure in expected_failures: print('\t{}'.format(colorful.yellow(failure))) print('in the test output. This was unexpected.\n') test_result = colorful.red('failed') failure_happened = True if not failure_happened: test_result = colorful.green('passed') else: print('Output: {}'.format(test_process.stdout)) test_result = colorful.red('failed') failure_happened = True except subprocess.CalledProcessError as e: failure_happened = True if e.returncode != 1: test_result = colorful.orange('errored') else: test_result = colorful.red('failed') print( 'Expected a different return code. Received {}'.format( colorful.yellow(e.returncode))) print('Output: {}'.format(e.stdout)) test_summary.append('{:.<70s}{:.>10s}'.format(test_dir, test_result)) return failure_happened, test_summary
def report_globals(): enc_report = 'encoding={}'.format(args['--encode']) if args['--encode'] \ else 'encoding not set' report(colorful.yellow(enc_report))