def list_jobs(filter_prj=None, filter_cmd=None, by_id=None): db_results = [] if by_id: job = rms_db.get(doc_id=by_id) if job: db_results.append(rms_db.get(doc_id=by_id)) else: print(colorful.bold_red(f" no job with id {by_id} found")) else: db_results = rms_db.all() if filter_cmd: db_results = rms_db.search(Query()["<command>"] == filter_cmd) if filter_prj: db_results = [r for r in db_results if filter_prj in r["<project_code>"]] if db_results: print(colorful.cyan("id".rjust(6)), colorful.cyan("project_code".ljust(18)), colorful.cyan("command".ljust(14)), colorful.cyan("start_time".rjust(12)), colorful.cyan("timeout".rjust(8)), ) for job in db_results: job_id = job.doc_id job_timeout = job.get("timeout") if not job_timeout: job_timeout = 60 print(str(job_id).rjust(6), job["<project_code>"].ljust(18), job["<command>"].ljust(14), job[">start_time"].rjust(12), str(job_timeout).rjust(8), ) else: print(colorful.bold_red(" no matching entries found."))
def apply_statements(statements: str, db: str): """ Apply a file of SQL statements to a database. :param statements: An SQL file of statements to apply to the database. :param db: A database to target. """ if not os.path.exists(statements): print( cf.bold_red("Error:"), f'Could not find file "{statements}"', file=sys.stderr, ) sys.exit(os.EX_OSFILE) base_uri = copy_url(db) target_exists = database_exists(base_uri, test_can_select=True) if not target_exists: print( cf.bold_red("Error:"), f'Database "{base_uri.database}" does not exist.', ) sys.exit(os.EX_NOHOST) with S(db) as s: try: load_sql_from_file(s, statements) except Exception as e: print(cf.bold_red("Error:"), e, file=sys.stderr) sys.exit(os.EX_DATAERR) print(cf.bold("All done! ✨"))
def dot_formatter_failure_summary(self, features, marker): """Output summary for failed Scenarios.""" if not self._failed_steps: return output = "\n" + cf.bold_red("Failures:") + "\n" for step in self._failed_steps: output += "{}: {}\n {}\n".format( step.path, step.parent.sentence, cf.red(step.sentence) ) if world.config.with_traceback: output += " {}\n".format( "\n ".join( [ str(cf.red(l)) for l in step.failure.traceback.split("\n")[:-2] ] ) ) output += " {}: {}\n\n".format( cf.bold_red(step.failure.name), cf.red(step.failure.reason) ) sys.stdout.write(u(output + "\n"))
def init(db: str = None, schema: str = "schema.sql", overwrite: bool = False): """ Create an initial schema SQL file, optionally from an existing database. :param db: An optional database to create the schema from. :param schema: An optional file to write schema to. Default: schema.sql :param overwrite: Overwrite existing file. """ if os.path.exists(schema) and not overwrite: print(cf.bold_red("Error:"), f'File "{schema}" already exists.', file=sys.stderr) print("Run again with", cf.bold("--overwrite"), "to replace.") sys.exit(os.EX_OSFILE) if not db: with open(schema, "w") as f: f.write("") print(cf.bold("All done! ✨")) print(f'Created blank file "{schema}"') sys.exit() base_uri = copy_url(db) target_exists = database_exists(base_uri, test_can_select=True) if not target_exists: print( cf.bold_red("Error:"), f'Database "{base_uri.database}" does not exist.', ) sys.exit(os.EX_NOHOST) sql = "" patch = create_admin_patch(base_uri) patch.start() with temporary_database(base_uri) as sTemp, S(db) as sFrom: # Compare m = Migration(sTemp, sFrom) m.add_all_changes() m.set_safety(False) # Get SQL sql = m.sql with open(schema, "wb") as f: f.write(pg_format(sql.encode(), unquote=False)) print(cf.bold("All done! ✨")) print(f'Created file "{schema}" with schema from "{base_uri.database}"') sys.exit()
def _emu_init(self): """Create emulation object. Map/initialize memory, add needed hooks""" try: self.emu = Uc(UC_ARCH_ARM, UC_MODE_ARM) self._emu_init_memory(self.bin_type) self._emu_init_hooks() except UcError as err: print colorful.bold_red( "Error initializing emulator: {}".format(err)) return False self.reg_write(UC_ARM_REG_PC, self.start_addr) self.is_init = True return True
def check_extensions(): newline() subheader("Verifying extension programs...") errors = [] programs = ( ( "jpegtran", "Thumbor uses jpegtran for optimizing JPEG images. " "For more information visit https://linux.die.net/man/1/jpegtran.", ), ( "ffmpeg", "Thumbor uses ffmpeg for rendering animated images as GIFV. " "For more information visit https://www.ffmpeg.org/.", ), ( "gifsicle", "Thumbor uses gifsicle for better processing of GIF images. " "For more information visit https://www.lcdf.org/gifsicle/.", ), ) for program, error_message in programs: path = which(program) if path is None: print(cf.bold_red("❎ %s is not installed." % program)) print(error_message) newline() errors.append(error_message) else: print(cf.bold_green("✅ %s is installed correctly." % program)) return errors
def _process(self): # Prepare message msg = [] for msg_index in range(0,len(self.message)): if self.exit_on_failure is False: msg_header = '{}{}'.format(self.exception_name, colorful.bold_white(':')) if msg_index == 0 else ' '*(len(self.exception_name)+1) msg.append('\t\t{} {}'.format(colorful.bold_red(msg_header), colorful.red(self.message[msg_index]))) else: msg.append(self.message[msg_index] if msg_index == 0 else '{}{} {} {}'.format("\t"*2, ' '*(len(self.exception_name)+1), colorful.bold_white(':'), self.message[msg_index])) if self.exit_on_failure is False: for message in msg: console_write(message) if self.no_failure is False: self._fail_step(self.step_obj.id) else: self.step_obj.state = Step.State.SKIPPED return if self.no_failure is False: raise self.exception('\n'.join(msg))
def check_modules(): newline() subheader("Verifying libraries support...") errors = [] modules = ( ( "pycurl", "Thumbor works much better with PyCurl. For more information visit http://pycurl.io/.", ), ( "cv2", "Thumbor requires OpenCV for smart cropping. For more information check https://opencv.org/.", ), ( "pyexiv2", "Thumbor uses exiv2 for reading image metadata. For more information check https://python3-exiv2.readthedocs.io/en/latest/.", ), ( "cairosvg", "Thumbor uses CairoSVG for reading SVG files. For more information check https://cairosvg.org/.", ), ) for module, error_message in modules: try: import_module(module) # NOQA print(cf.bold_green("✅ %s is installed correctly." % module)) except ImportError as error: print(cf.bold_red("❎ %s is not installed." % module)) print(error_message) newline() errors.append("%s - %s" % (str(error), error_message)) return errors
def _process(self): # Prepare message msg = [] for msg_index in range(0, len(self.message)): if self.exit_on_failure is False or self.no_failure is True: msg_header = '{}{}'.format( self.exception_name, colorful.bold_white(':') ) if msg_index == 0 else ' ' * (len(self.exception_name) + 1) if str(world.config.formatter) in ('gherkin'): # this line could be improved by letting radish handle the printing msg.append('\t\t{} {}'.format( colorful.bold_red(msg_header), colorful.red(self.message[msg_index]))) elif str(world.config.formatter) in ('silent_formatter'): msg.append('{} '.format(colorful.bold_red(msg_header))) msg.append('{}'.format( colorful.red(self.message[msg_index]))) else: msg.append( self.message[msg_index] if msg_index == 0 else '{}{} {} {}' .format("\t" * 2, ' ' * (len(self.exception_name) + 1), colorful.bold_white(':'), self.message[msg_index])) if self.exit_on_failure is False or (self.no_failure is True and msg): if str(world.config.formatter) in ('gherkin'): for message in msg: console_write(message) elif str(world.config.formatter) in ('silent_formatter'): if not hasattr(self.step_obj.context, 'failure_msg'): # where to put this self.step_obj.context.failure_msg = [] self.step_obj.context.failure_msg.extend(msg) if self.no_failure is False: self._fail_step(self.step_obj.id) else: self.step_obj.state = Step.State.SKIPPED for step in self.step_obj.parent.all_steps: step.runable = False return if self.no_failure is False: raise self.exception('\n'.join(msg))
def check_model_path(job_id): job_id = int(job_id) job = rms_db.get(doc_id=job_id) model_path = pathlib.Path(job["<full_model_path>"]) if model_path.exists(): print(colorful.bold_green(f" model found at: {model_path}")) return True else: print(colorful.bold_red(f" could not find model at: {model_path}"))
def __str__(self): out_str = "" title = self.document.get("title") version = self.document.get("version") out_str += colorful.bold_red("{} {}\n".format(title, version)) out_str += self.document.get("description") + "\n\n" out_str += header("Resources: ") for resource in self.get_resources(): out_str += colorful.green(resource) + "\n" return out_str
def diff_file( schema: str, db: str, unsafe: bool = False, apply: bool = False, ): """ Diff a file of SQL statements against a database. :param schema: The SQL schema to match against. :param db: The database to target. :param unsafe: Generate unsafe statements. :param apply: Apply the statements to bring the target database up to date. """ if not os.path.exists(schema): print(cf.bold_red("Error:"), f'Could not find file "{schema}"', file=sys.stderr) sys.exit(os.EX_OSFILE) sql_statements = sql_from_file(schema) try: statements, generated_unsafe = diff(sql_statements, db, unsafe, apply, True) except DatabaseDoesNotExist as e: print( cf.bold_red("Error:"), f'Database "{e.database}" does not exist.', file=sys.stderr, ) sys.exit(os.EX_NOHOST) except SQLSyntaxError as e: print(cf.bold_red("Error:"), e.error, file=sys.stderr) sys.exit(os.EX_DATAERR) if generated_unsafe: print( cf.bold_yellow("Careful:"), "Unsafe statements generated.", file=sys.stderr, ) print("Run again with", cf.bold("--unsafe")) sys.exit(os.EX_USAGE)
def format(self, record): levelname = record.levelname if levelname in ('NOTSET', 'DEBUG'): record.levelname = colorful.cyan(levelname) elif levelname in ('INFO', ): record.levelname = colorful.green(levelname) elif levelname in ('WARNING', ): record.levelname = colorful.bold_yellow(levelname) elif levelname in ('ERROR', 'CRITICAL'): record.levelname = colorful.bold_red(levelname) return logging.Formatter.format(self, record)
def write_stdout(level, message): prefix = colorful.bold_yellow(u'\u229b INFO :') if level == 'WARNING': prefix = colorful.bold_red(u'\u2757 WARNING :') message = colorful.yellow(message) added_prefix = u'\n\t\t{}\t{} '.format(colorful.gray(u'\u2502'),' '*len(prefix)) message = message.split('\n') console_write(u'\t\t\u251c\u2501\t{} {}'.format(prefix, added_prefix.join(message)))
def output_failure(step_func, errors): """ Write the given errors to stdout. """ sys.stdout.write(u(colorful.bold_red("✘"))) if step_func is not None: sys.stdout.write( u(colorful.red(" (at {0})".format(get_func_location(step_func))))) sys.stdout.write("\n") for error in errors: print(u(colorful.red(" - {0}".format(error))))
def output_failure(step_func, errors): """ Write the given errors to stdout. """ sys.stdout.write(u(colorful.bold_red(u'✘'))) if step_func is not None: sys.stdout.write( u(colorful.red(' (at {0})'.format(get_func_location(step_func))))) sys.stdout.write('\n') for error in errors: print(u(colorful.red(' - {0}'.format(error))))
def output_failure(step_func, errors): """ Write the given errors to stdout. """ sys.stdout.write(u(colorful.bold_red("✘"))) if step_func is not None: sys.stdout.write( u(colorful.red(" (at {0})".format(get_func_location(step_func)))) ) sys.stdout.write("\n") for error in errors: print(u(colorful.red(" - {0}".format(error))))
def check_filters(): newline() subheader("Verifying thumbor filters...") errors = [] for filter_name in BUILTIN_FILTERS: try: import_module(filter_name) print(cf.bold_green("✅ %s" % filter_name)) except ImportError as error: print(cf.bold_red("❎ %s" % filter_name)) errors.append(error) return errors
def get_jrn_and_post_process(search_command, commands_dir): """ Searches command paths for register dict in __init__.py in command roots to prepare appropriate command strings to be inserted into the journal file :param search_command: command name to look up :param commands_dir: commands directory :return: command module, post process dict """ found_dir = False module_rjm = None post_proc_dict = defaultdict() for directory in os.scandir(commands_dir): command_name = directory.name # print(command_name) if search_command == command_name: found_dir = True print( f" found appropriate command directory {op.join(commands_dir, command_name)}" ) if op.exists(f"{commands_dir}/{command_name}/__init__.py"): mod = machinery.SourceFileLoader( command_name, op.join(commands_dir, command_name, "__init__.py")).load_module() else: exit_with_log('__init__.py in command directory not found') if "register" in dir(mod): if mod.register["name"] == command_name: # print("command_name found!") if "rjm" in mod.register: module_rjm = mod.register["rjm"] if "post_process" in mod.register: external_args = [] for arg in mod.register["post_process"]["args"]: external_args.append(globals().get(arg)) post_proc_dict["func"] = mod.register["post_process"][ "func"] post_proc_dict["args"] = external_args if not found_dir: print( colorful.bold_red( f" appropriate command directory for '{search_command}' not found - aborting." )) exit_with_log('command directory not found') return module_rjm, post_proc_dict
def check_compiled_extensions(): newline() subheader("Verifying thumbor compiled extensions...") errors = [] for extension in BUILTIN_EXTENSIONS: ext_name = extension.replace("thumbor.ext.filters.", "") try: import_module(extension) print(cf.bold_green("✅ %s" % ext_name)) except ImportError as error: print(cf.bold_red("❎ %s" % ext_name)) errors.append(error) return errors
def __str__(self): probs = self.substreams.get_probs() deltas = self.substreams.get_deltas() max_key, max_prob = sorted(probs.items(), key=(lambda x: x[1]))[-1] info = colorful.bold_cyan("total substream: {}".format(len(self.substreams))).styled_string for key, value in self.substreams.items(): _info = "\nsubstream {}:\t examples {}\t delta: {:.2f}\t out-prob: {:.2f}".format( key, len(value), deltas[key], probs[key]) if key == max_key: _info = colorful.bold_red(_info).styled_string else: _info = colorful.bold_cyan(_info).styled_string info += _info return info
def import_xmls_into_db(): """ import all xml rms tasks from db/xml_import directory into db """ found_rms_task_xml = False for entry in os.scandir(rms_paths.xml_imp): if not entry.is_file(): continue if not entry.name.endswith(".xml"): continue with open(rms_paths.xml_imp / entry.name, "r", encoding="utf-16le") as xml_task: xml_content = xml_task.read() re_process_model = re.compile("process_model.py") is_rms_task = re.findall(re_process_model, xml_content) if is_rms_task: print(colorful.bold_green(f" processing xml: {entry.name}")) found_rms_task_xml = True cmd_tokens = {"args": {}, "opts": []} re_args = re.compile("Arguments>(.+)</Arguments") re_opts = re.compile("(--.+)", ) re_start = re.compile("StartBoundary>.+T(.+)</StartBoundary") arguments = re.findall(re_args, xml_content) options = re.findall(re_opts, arguments[0])[0] cmd_args = arguments[0].split("--")[0].split() cmd_tokens["args"]["<command>"] = cmd_args[1] cmd_tokens["args"]['<project_code>'] = cmd_args[2] cmd_tokens["args"]["<full_model_path>"] = cmd_args[3] cmd_tokens["args"][">start_time"] = re.findall( re_start, xml_content)[0] cmd_tokens["opts"] = [ "--" + tok.strip() for tok in options.split("--") if tok ] # print(f" found {cmd_tokens}") db_job_dict = serdes( cmd_tokens=cmd_tokens) # {"args": args, "opts": options}) pprint(db_job_dict) rms_db.upsert( db_job_dict, (Query()["<project_code>"] == cmd_tokens["args"]['<project_code>']) & (Query()["<command>"] == cmd_tokens["args"]["<command>"])) print(" added/updated in db.") if not found_rms_task_xml: print( colorful.bold_red( f" could not find rms task xml in: {rms_paths.db}"))
def test_problem(problem, log=True): if log: print(f"👷 Testing {problem}...") lang = language_detector.determine_language(problem) lang_config = language_detector.get_config(lang) if log: print(f"👷 Language = {lang_config.kattis_name} {lang_config.emoji}\n") inputs = glob.glob(f"./{problem}/*.in") count = 0 failed = False for input in inputs: count += 1 if log: print(f"🔎 Test number {count}:") input_file = open(input, "rb") input_content = input_file.read() program_path = f"./{problem}/solution.{lang_config.file_extension}" output_string = lang_config.run_program(program_path, input_content) answer = input.replace('.in', '.ans') answer_file = open(answer, "r") answer_content = answer_file.read() if output_string.replace("\r\n", "\n") != answer_content.replace( "\r\n", "\n"): if log: print(cf.bold_red("❌ Failed...")) print("__________INPUT____________") print(input_content.decode('utf-8')) print("__________INPUT____________") print(cf.red("__________OUTPUT___________")) print(cf.red(output_string)) print(cf.red("__________OUTPUT___________")) print("__________EXPECTED_________") print(answer_content) print("__________EXPECTED_________") failed = True elif log: print(cf.bold_green("✅ Test succesful!\n")) return not failed
def main(): """Converts a given url with the specified arguments.""" options = get_options() cf.use_style("solarized") if options["nocolor"]: cf.disable() newline() header("Thumbor v%s (of %s)" % (__version__, __release_date__)) newline() print( "Thumbor doctor will analyze your install and verify if everything is working as expected." ) errors = check_modules() errors += check_compiled_extensions() errors += check_filters() errors += check_extensions() newline() if errors: print( cf.bold_red( "😞 Oh no! We found some things that could improve... 😞")) newline() print("\n".join(["* %s" % str(err) for err in errors])) newline() newline() print( cf.cyan( "If you don't know how to fix them, please open an issue with thumbor." )) print( cf.cyan( "Don't forget to copy this log and add it to the description of your issue." )) print("Open an issue at https://github.com/thumbor/thumbor/issues/new") sys.exit(1) return print(cf.bold_green("🎉 Congratulations! No errors found! 🎉"))
def log(msg: str, tp: int = 1, name: str = "NNG One", force: bool = False): self = Logger try: debug = NNGRawSaves().debug except Exception: debug = self.level.none if tp == self.level.success: print(cf.bold_green(f"[{name}] {msg}")) elif tp == self.level.warn: print(cf.bold_blue(f"[{name}] {msg}")) elif tp == self.level.tagged_success: print(cf.bold_green(f"[{name}] {Globals.tag} | {msg}")) elif (tp == self.level.error and debug >= self.config_logging_level.log_errors) or force: print(cf.bold_red(f"[{name}] [ERROR] {msg}")) elif (tp == self.level.debug and debug >= self.config_logging_level.log_debug) or force: print(cf.bold_purple(f"[{name}] [DEBUG] {msg}")) elif tp == self.level.hell and debug >= self.config_logging_level.log_hell: print(cf.bold_grey(f"{name} [HELL] {msg}"))
def test(self): self.model.eval() test_loss = 0 test_loader = self.dataset.test_loader with torch.no_grad(): for i, (data, _) in enumerate(test_loader): data = data.cuda() data = self.dataset.preprocess(data) test_loss += self.model(data).item() * len(data) test_loss /= len(test_loader.dataset) if self.best_test_loss is None or test_loss < self.best_test_loss: self.best_test_loss = test_loss self.best_epoch = self.epoch self.save_model(self.epoch) print( colorful.bold_red( '====> Test set loss: {:.4f}'.format(test_loss)).styled_string) self.writer.add_scalar('test/loss', test_loss, self.epoch)
def read_journal(journal_path): """ reads journal file to detect key phrases :param journal_path: journal file path :return:dict """ detected = {} with open(journal_path, 'rb') as journal: journal_name = op.basename(journal_path) for line in journal: decoded_line = line.decode("latin1", "ignore") for key_phrase in key_phrases: if key_phrase in decoded_line: print( colorful.bold_red( f"-!!_found:{key_phrases[key_phrase]}_!!")) print(" journal path: {}".format(journal_path)) # print(decoded_line) detected[ key_phrases[key_phrase]] = journal_name + decoded_line return detected if not detected: detected["nothing detected in"] = journal_name return detected
def test_step_matches_configs(match_config_files, basedirs, cover_min_percentage=None, cover_show_missing=False): """ Test if the given match config files matches the actual matched step implementations. """ if cover_min_percentage is not None and float(cover_min_percentage) > 100: sys.stderr.write( str( colorful.magenta( 'You are a little cocky to think you can reach a minimum coverage of {0:.2f}%\n' .format(float(cover_min_percentage))))) return 3 # load user's custom python files for basedir in basedirs: load_modules(basedir) steps = StepRegistry().steps if not steps: sys.stderr.write( str( colorful.magenta( 'No step implementations found in {0}, thus doesn\'t make sense to continue' .format(basedirs)))) return 4 failed = 0 passed = 0 covered_steps = set() for match_config_file in match_config_files: # load the given match config file with codecs.open(match_config_file, "r", "utf-8") as f: match_config = yaml.safe_load(f) if not match_config: print( colorful.magenta( 'No sentences found in {0} to test against'.format( match_config_file))) return 5 print( colorful.yellow('Testing sentences from {0}:'.format( colorful.bold_yellow(match_config_file)))) failed_sentences, passed_senteces = test_step_matches( match_config, steps) failed += failed_sentences passed += passed_senteces covered_steps = covered_steps.union(x['should_match'] for x in match_config if 'should_match' in x) # newline sys.stdout.write('\n') report = colorful.bold_white('{0} sentences ('.format(failed + passed)) if passed > 0: report += colorful.bold_green('{0} passed'.format(passed)) if passed > 0 and failed > 0: report += colorful.bold_white(', ') if failed > 0: report += colorful.bold_red('{0} failed'.format(failed)) report += colorful.bold_white(')') print(report) step_coverage = 100.0 / len(steps) * len(covered_steps) coverage_report = colorful.bold_white( 'Covered {0} of {1} step implementations'.format( len(covered_steps), len(steps))) ret = 0 if failed == 0 else 1 if cover_min_percentage: coverage_color = colorful.bold_green if step_coverage >= float( cover_min_percentage) else colorful.bold_red coverage_report += colorful.bold_white(' (coverage: ') coverage_report += coverage_color('{0:.2f}%'.format(step_coverage)) if float(cover_min_percentage) > step_coverage: coverage_report += colorful.bold_white( ', expected a minimum of {0}'.format( colorful.bold_green(cover_min_percentage + '%'))) if failed == 0: ret = 2 # if tests have passed and coverage is too low we fail with exit code 2 coverage_report += colorful.bold_white(')') print(coverage_report) if cover_show_missing: missing_steps = get_missing_steps(steps, covered_steps) if missing_steps: missing_step_report = colorful.bold_yellow('Missing steps:\n') for step in missing_steps: missing_step_report += '- {0} at '.format( colorful.cyan(step[0])) missing_step_report += colorful.cyan(step[1]) + '\n' sys.stdout.write(missing_step_report) return ret
rvt_journal_writer.rps_addin_template, rvt_model_version, ) run_proc = rvt_journal_run(rvt_install_path, journal_file_path, paths["root_dir"]) run_proc_id = run_proc.pid run_proc_name = run_proc.name() # let's wait a second for rvt process to fire up time.sleep(0.5) if run_proc.name() == "Revit.exe": proc_name_colored = colorful.bold_green(run_proc_name) else: proc_name_colored = colorful.bold_red(run_proc_name) print(f" process info: {run_proc_id} - {proc_name_colored}") print(colorful.bold_orange(f"-detected revit: {rvt_model_version}")) print(f" version:{rvt_model_version} at path: {rvt_install_path}") print(colorful.bold_orange("-process countdown:")) print( f" timeout until termination of process: {run_proc_id} - {proc_name_colored}:" ) log_journal = get_rvt_proc_journal(run_proc, paths["journals_dir"]) return_code = None return_logging = logging.info
def console_writer_after_each_scenario(self, scenario): """ If the scenario is a ExampleScenario it will write the Examples header :param Scenario scenario: the scenario which was ran. """ output = "" if isinstance(scenario, ScenarioOutline): output += "\n {0}:\n".format( colorful.bold_white(scenario.example_keyword) ) output += colorful.bold_white( " {0}| {1} |".format( self.get_id_padding(len(scenario.scenarios), offset=2), " | ".join( "{1: <{0}}".format(scenario.get_column_width(i), x) for i, x in enumerate(scenario.examples_header) ), ) ) elif isinstance(scenario, ScenarioLoop): output += "\n {0}: {1}".format( colorful.bold_white(scenario.iterations_keyword), colorful.cyan(scenario.iterations), ) elif isinstance(scenario.parent, ScenarioOutline): colored_pipe = colorful.bold_white("|") color_func = self.get_color_func(scenario.state) output += "{0} {1}{2} {3} {2}".format( self.get_line_jump_seq(), self.get_id_sentence_prefix( scenario, colorful.bold_cyan, len(scenario.parent.scenarios) ), colored_pipe, (" {0} ") .format(colored_pipe) .join( str( color_func( "{1: <{0}}".format(scenario.parent.get_column_width(i), x) ) ) for i, x in enumerate(scenario.example.data) ), ) if scenario.state == Step.State.FAILED: failed_step = scenario.failed_step if world.config.with_traceback: output += "\n {0}{1}".format( self.get_id_padding(len(scenario.parent.scenarios)), "\n ".join( [ str(colorful.red(l)) for l in failed_step.failure.traceback.split("\n")[:-2] ] ), ) output += "\n {0}{1}: {2}".format( self.get_id_padding(len(scenario.parent.scenarios)), colorful.bold_red(failed_step.failure.name), colorful.red(failed_step.failure.reason), ) elif isinstance(scenario.parent, ScenarioLoop): colored_pipe = colorful.bold_white("|") color_func = self.get_color_func(scenario.state) output += "{0} {1}{2} {3: <18} {2}".format( self.get_line_jump_seq(), self.get_id_sentence_prefix( scenario, colorful.bold_cyan, len(scenario.parent.scenarios) ), colored_pipe, str(color_func(scenario.iteration)), ) if scenario.state == Step.State.FAILED: failed_step = scenario.failed_step if world.config.with_traceback: output += "\n {0}{1}".format( self.get_id_padding(len(scenario.parent.scenarios)), "\n ".join( [ str(colorful.red(l)) for l in failed_step.failure.traceback.split("\n")[:-2] ] ), ) output += "\n {0}{1}: {2}".format( self.get_id_padding(len(scenario.parent.scenarios)), colorful.bold_red(failed_step.failure.name), colorful.red(failed_step.failure.reason), ) if output: write(output)
def test_step_matches_configs( match_config_files, basedirs, cover_min_percentage=None, cover_show_missing=False ): """ Test if the given match config files matches the actual matched step implementations. """ if cover_min_percentage is not None and float(cover_min_percentage) > 100: sys.stderr.write( str( colorful.magenta( "You are a little cocky to think you can reach a minimum coverage of {0:.2f}%\n".format( float(cover_min_percentage) ) ) ) ) return 3 # load user's custom python files for basedir in basedirs: load_modules(basedir) steps = StepRegistry().steps if not steps: sys.stderr.write( str( colorful.magenta( "No step implementations found in {0}, thus doesn't make sense to continue".format( basedirs ) ) ) ) return 4 failed = 0 passed = 0 covered_steps = set() for match_config_file in match_config_files: # load the given match config file with codecs.open(match_config_file, "r", "utf-8") as f: match_config = yaml.safe_load(f) if not match_config: print( colorful.magenta( "No sentences found in {0} to test against".format( match_config_file ) ) ) return 5 print( colorful.yellow( "Testing sentences from {0}:".format( colorful.bold_yellow(match_config_file) ) ) ) failed_sentences, passed_senteces = test_step_matches(match_config, steps) failed += failed_sentences passed += passed_senteces covered_steps = covered_steps.union( x["should_match"] for x in match_config if "should_match" in x ) # newline sys.stdout.write("\n") report = colorful.bold_white("{0} sentences (".format(failed + passed)) if passed > 0: report += colorful.bold_green("{0} passed".format(passed)) if passed > 0 and failed > 0: report += colorful.bold_white(", ") if failed > 0: report += colorful.bold_red("{0} failed".format(failed)) report += colorful.bold_white(")") print(report) step_coverage = 100.0 / len(steps) * len(covered_steps) coverage_report = colorful.bold_white( "Covered {0} of {1} step implementations".format(len(covered_steps), len(steps)) ) ret = 0 if failed == 0 else 1 if cover_min_percentage: coverage_color = ( colorful.bold_green if step_coverage >= float(cover_min_percentage) else colorful.bold_red ) coverage_report += colorful.bold_white(" (coverage: ") coverage_report += coverage_color("{0:.2f}%".format(step_coverage)) if float(cover_min_percentage) > step_coverage: coverage_report += colorful.bold_white( ", expected a minimum of {0}".format( colorful.bold_green(cover_min_percentage + "%") ) ) if failed == 0: ret = 2 # if tests have passed and coverage is too low we fail with exit code 2 coverage_report += colorful.bold_white(")") print(coverage_report) if cover_show_missing: missing_steps = get_missing_steps(steps, covered_steps) if missing_steps: missing_step_report = colorful.bold_yellow("Missing steps:\n") for step in missing_steps: missing_step_report += "- {0} at ".format(colorful.cyan(step[0])) missing_step_report += colorful.cyan(step[1]) + "\n" sys.stdout.write(missing_step_report) return ret
def console_writer_after_each_step(self, step): """ Writes the step to the console after it was run :param Step step: the step to write to the console """ if not isinstance(step.parent.parent, Feature): return color_func = self.get_color_func(step.state) line_jump_seq = self.get_line_jump_seq() * ( ((len(step.raw_text) + 3) if step.text else 1) + (len(step.table) + 1 if step.table_header else 0) ) output = "{0} ".format(line_jump_seq) if isinstance(step.parent, ScenarioOutline): # Highlight ScenarioOutline placeholders e.g. '<method>' output += "".join( str( colorful.white(item) if ( self._placeholder_regex.search(item) and item.strip("<>") in step.parent.examples_header ) else color_func(item) ) for item in self._placeholder_regex.split(step.sentence) ) else: output += "{0}{1}".format( self.get_id_sentence_prefix(step, colorful.bold_cyan), color_func(step.sentence), ) if step.text: id_padding = self.get_id_padding(len(step.parent.steps)) output += colorful.bold_white('\n {0}"""'.format(id_padding)) output += colorful.cyan( "".join( [ "\n {0}{1}".format(id_padding, l) for l in step.raw_text ] ) ) output += colorful.bold_white('\n {0}"""'.format(id_padding)) if step.table_header: colored_pipe = colorful.bold_white("|") col_widths = self.get_table_col_widths( [step.table_header] + step.table_data ) # output table header output += "\n {0} {1} {0}".format( colored_pipe, (" {0} ") .format(colored_pipe) .join( str(colorful.white("{1: <{0}}".format(col_widths[i], x))) for i, x in enumerate(step.table_header) ), ) # output table data for row in step.table_data: output += "\n {0} {1} {0}".format( colored_pipe, (" {0} ") .format(colored_pipe) .join( str(color_func("{1: <{0}}".format(col_widths[i], x))) for i, x in enumerate(row) ), ) if step.state == step.State.FAILED: if world.config.with_traceback: output += "\n {0}{1}".format( self.get_id_padding(len(step.parent.steps) - 2), "\n ".join( [ str(colorful.red(l)) for l in step.failure.traceback.split("\n")[:-2] ] ), ) output += "\n {0}{1}: {2}".format( self.get_id_padding(len(step.parent.steps) - 2), colorful.bold_red(step.failure.name), colorful.red(step.failure.reason), ) write(output)
def main(): # Argument passing/parsing args, model_args = config_utils.initialize_argparser( MODELS, _command_args, custom_argparsers.DialogArgumentParser) hparams, hparams_dict = config_utils.create_or_load_hparams( args, model_args, args.cfg) pprint(hparams_dict) # Set environment variables & gpus set_logger() set_gpus(hparams.gpus) set_tcmalloc() gpus = tf.config.experimental.list_physical_devices('GPU') tf.config.experimental.set_visible_devices(gpus, 'GPU') for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) # Set random seed tf.random.set_seed(hparams.random_seed) np.random.seed(hparams.random_seed) random.seed(hparams.random_seed) # For multi-gpu if hparams.num_gpus > 1: mirrored_strategy = tf.distribute.MirroredStrategy() # NCCL will be used as default else: mirrored_strategy = None # Download BERT pretrained model if not os.path.exists(hparams.bert_dir): os.makedirs(hparams.bert_dir) fname = 'uncased_L-12_H-768_A-12.zip' gd_id = '17rfV9CleFBwwfS7m5Yd72vvxdPLWBHl6' download_from_google_drive(gd_id, os.path.join(hparams.bert_dir, fname)) unzip(hparams.bert_dir, fname) # Make dataset reader os.makedirs(hparams.cache_dir, exist_ok=True) if hparams.data_name == "wizard_of_wikipedia": reader_cls = WowDatasetReader elif hparams.data_name == "holle": reader_cls = HolleDatasetReader else: raise ValueError("data_name must be one of 'wizard_of_wikipedia' and 'holle'") reader = reader_cls( hparams.batch_size, hparams.num_epochs, buffer_size=hparams.buffer_size, bucket_width=hparams.bucket_width, max_length=hparams.max_length, max_episode_length=hparams.max_episode_length, max_knowledge=hparams.max_knowledge, knowledge_truncate=hparams.knowledge_truncate, cache_dir=hparams.cache_dir, bert_dir=hparams.bert_dir, ) train_dataset, iters_in_train = reader.read('train', mirrored_strategy) test_dataset, iters_in_test = reader.read('test', mirrored_strategy) if hparams.data_name == 'wizard_of_wikipedia': unseen_dataset, iters_in_unseen = reader.read('test_unseen', mirrored_strategy) vocabulary = reader.vocabulary # Build model & optimizer & trainer if mirrored_strategy: with mirrored_strategy.scope(): model = MODELS[hparams.model](hparams, vocabulary) optimizer = tf.keras.optimizers.Adam(learning_rate=hparams.init_lr, clipnorm=hparams.clipnorm) else: model = MODELS[hparams.model](hparams, vocabulary) optimizer = tf.keras.optimizers.Adam(learning_rate=hparams.init_lr, clipnorm=hparams.clipnorm) trainer = Trainer(model, optimizer, mirrored_strategy, hparams.enable_function, WowDatasetReader.remove_pad) # misc (tensorboard, checkpoints) file_writer = tf.summary.create_file_writer(hparams.checkpoint_dir) file_writer.set_as_default() global_step = tf.compat.v1.train.get_or_create_global_step() checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model, optimizer_step=global_step) checkpoint_manager = tf.train.CheckpointManager(checkpoint, directory=hparams.checkpoint_dir, max_to_keep=hparams.max_to_keep) checkpoint_tracker = CheckpointTracker( hparams.checkpoint_dir, max_to_keep=BEST_N_CHECKPOINTS) # Main loop! train_dataset_iter = iter(train_dataset) for epoch in range(hparams.num_epochs): print(hparams.checkpoint_dir) base_description = f"(Train) Epoch {epoch}, GPU {hparams.gpus}" train_tqdm = trange(iters_in_train, ncols=120, desc=base_description) for current_step in train_tqdm: example = next(train_dataset_iter) global_step.assign_add(1) _global_step = int(global_step) # Train output_dict = trainer.train_step(example) # Print model if _global_step == 1: model.print_model() loss_str = str(output_dict['loss'].numpy()) train_tqdm.set_description(f"{base_description}, Loss {loss_str}") with file_writer.as_default(): if _global_step % int(hparams.logging_step) == 0: tf.summary.histogram('train/vocab', output_dict['sample_ids'], step=_global_step) tf.summary.scalar('train/loss', output_dict['loss'], step=_global_step) tf.summary.scalar('train/gen_loss', output_dict['gen_loss'], step=_global_step) tf.summary.scalar('train/knowledge_loss', output_dict['knowledge_loss'], step=_global_step) tf.summary.scalar('train/kl_loss', output_dict['kl_loss'], step=_global_step) # Test if _global_step % int(iters_in_train * hparams.evaluation_epoch) == 0: checkpoint_manager.save(global_step) test_loop_outputs = trainer.test_loop(test_dataset, iters_in_test, epoch, 'seen') if hparams.data_name == 'wizard_of_wikipedia': unseen_loop_outputs = trainer.test_loop(unseen_dataset, iters_in_unseen, epoch, 'unseen') test_summaries, log_dict = run_wow_evaluation( test_loop_outputs, hparams.checkpoint_dir, 'seen') if hparams.data_name == 'wizard_of_wikipedia': unseen_summaries, unseen_log_dict = run_wow_evaluation( unseen_loop_outputs, hparams.checkpoint_dir, 'unseen') # Logging tqdm.write(colorful.bold_green("seen").styled_string) tqdm.write(colorful.bold_red(pformat(log_dict)).styled_string) if hparams.data_name == 'wizard_of_wikipedia': tqdm.write(colorful.bold_green("unseen").styled_string) tqdm.write(colorful.bold_red(pformat(unseen_log_dict)).styled_string) with file_writer.as_default(): for family, test_summary in test_summaries.items(): for key, value in test_summary.items(): tf.summary.scalar(f'{family}/{key}', value, step=_global_step) if hparams.data_name == 'wizard_of_wikipedia': for family, unseen_summary in unseen_summaries.items(): for key, value in unseen_summary.items(): tf.summary.scalar(f'{family}/{key}', value, step=_global_step) if hparams.keep_best_checkpoint: current_score = log_dict["rouge1"] checkpoint_tracker.update(current_score, _global_step)
def console_write(self, features, marker): """ Writes the endreport for all features :param list features: all features """ stats = { "features": { "amount": 0, "passed": 0, "failed": 0, "skipped": 0, "untested": 0, "pending": 0, }, "scenarios": { "amount": 0, "passed": 0, "failed": 0, "skipped": 0, "untested": 0, "pending": 0, }, "steps": { "amount": 0, "passed": 0, "failed": 0, "skipped": 0, "untested": 0, "pending": 0, }, } pending_steps = [] duration = timedelta() for feature in features: if not feature.has_to_run(world.config.scenarios): continue stats["features"]["amount"] += 1 stats["features"][feature.state] += 1 if feature.state in [Step.State.PASSED, Step.State.FAILED]: duration += feature.duration for scenario in feature.all_scenarios: if not scenario.has_to_run(world.config.scenarios): continue if isinstance(scenario, ScenarioOutline): # skip ScenarioOutlines continue if isinstance(scenario, ScenarioLoop): # skip ScenarioLoop continue stats["scenarios"]["amount"] += 1 stats["scenarios"][scenario.state] += 1 for step in scenario.steps: stats["steps"]["amount"] += 1 stats["steps"][step.state] += 1 if step.state == Step.State.PENDING: pending_steps.append(step) colored_closing_paren = colorful.bold_white(")") colored_comma = colorful.bold_white(", ") passed_word = colorful.bold_green("{0} passed") failed_word = colorful.bold_red("{0} failed") skipped_word = colorful.cyan("{0} skipped") pending_word = colorful.bold_yellow("{0} pending") output = colorful.bold_white( "{0} features (".format(stats["features"]["amount"]) ) output += passed_word.format(stats["features"]["passed"]) if stats["features"]["failed"]: output += colored_comma + failed_word.format(stats["features"]["failed"]) if stats["features"]["skipped"]: output += colored_comma + skipped_word.format(stats["features"]["skipped"]) if stats["features"]["pending"]: output += colored_comma + pending_word.format(stats["features"]["pending"]) output += colored_closing_paren output += "\n" output += colorful.bold_white( "{} scenarios (".format(stats["scenarios"]["amount"]) ) output += passed_word.format(stats["scenarios"]["passed"]) if stats["scenarios"]["failed"]: output += colored_comma + failed_word.format(stats["scenarios"]["failed"]) if stats["scenarios"]["skipped"]: output += colored_comma + skipped_word.format(stats["scenarios"]["skipped"]) if stats["scenarios"]["pending"]: output += colored_comma + pending_word.format(stats["scenarios"]["pending"]) output += colored_closing_paren output += "\n" output += colorful.bold_white("{} steps (".format(stats["steps"]["amount"])) output += passed_word.format(stats["steps"]["passed"]) if stats["steps"]["failed"]: output += colored_comma + failed_word.format(stats["steps"]["failed"]) if stats["steps"]["skipped"]: output += colored_comma + skipped_word.format(stats["steps"]["skipped"]) if stats["steps"]["pending"]: output += colored_comma + pending_word.format(stats["steps"]["pending"]) output += colored_closing_paren if pending_steps: sr = StepRegistry() pending_step_implementations = make_unique_obj_list( pending_steps, lambda x: x.definition_func ) output += colorful.white( "\nYou have {0} pending step implementation{1} affecting {2} step{3}:\n {4}\n\nNote: this could be the reason for some failing subsequent steps".format( len(pending_step_implementations), "s" if len(pending_step_implementations) is not 1 else "", len(pending_steps), "s" if len(pending_steps) is not 1 else "", "\n ".join( [ "- '{0}' @ {1}".format( sr.get_pattern(s.definition_func), get_func_code(s.definition_func).co_filename, ) for s in pending_step_implementations ] ), ) ) output += "\n" if world.config.wip: if stats["scenarios"]["passed"] > 0: output += colorful.red( "\nThe --wip switch was used, so I didn't expect anything to pass. These scenarios passed:\n" ) has_passed_scenarios = False for feature in features: passed_scenarios = list( filter( lambda s: s.state == Step.State.PASSED, feature.all_scenarios, ) ) for scenario in passed_scenarios: output += colorful.red( "\n - {}: {}".format(feature.path, scenario.sentence) ) has_passed_scenarios = True if has_passed_scenarios: output += "\n" else: output += colorful.green( "\nThe --wip switch was used, so the failures were expected. All is good.\n" ) output += colorful.cyan( "Run {0} finished within {1}".format( marker, humanize.naturaldelta(duration) ) ) write(output)
def main(args=None): """ Entrypont to radish. Setup up configuration, loads extensions, reads feature files and runs radish """ if args is None: args = sys.argv[1:] # note: using doc string for usage, messes up Sphinx documantation usage = """ Usage: radish show <features> [--expand] [--no-ansi] radish <features>... [-b=<basedir> | --basedir=<basedir>...] [-e | --early-exit] [--debug-steps] [-t | --with-traceback] [-m=<marker> | --marker=<marker>] [-p=<profile> | --profile=<profile>] [-d | --dry-run] [-s=<scenarios> | --scenarios=<scenarios>] [--shuffle] [--tags=<tags>] [--wip] [-f=<formatter> | --formatter=<formatter>] {0} radish (-h | --help) radish (-v | --version) Arguments: features feature files to run Options: -h --help show this screen -v --version show version -e --early-exit stop the run after the first failed step --debug-steps debugs each step -t --with-traceback show the Exception traceback when a step fails -m=<marker> --marker=<marker> specify the marker for this run [default: time.time()] -p=<profile> --profile=<profile> specify the profile which can be used in the step/hook implementation -b=<basedir> --basedir=<basedir>... set base dir from where the step.py and terrain.py will be loaded. [default: $PWD/radish] You can specify -b|--basedir multiple times or split multiple paths with a colon (:) similar to $PATH. All files will be imported. -d --dry-run make dry run for the given feature files -s=<scenarios> --scenarios=<scenarios> only run the specified scenarios (comma separated list) --shuffle shuttle run order of features and scenarios --tags=<feature_tags> only run Scenarios with the given tags --wip expects all tests to fail instead of succeeding -f=<formatter> --formatter=<formatter> the output formatter which should be used. [default: gherkin] --expand expand the feature file (all preconditions) {1} (C) Copyright by Timo Furrer <*****@*****.**> """ warnings.simplefilter("always", DeprecationWarning) # load extensions load_modules(os.path.join(os.path.dirname(__file__), "extensions")) extensions = ExtensionRegistry() # add arguments from extensions to the usage usage = usage.format(extensions.get_options(), extensions.get_option_description()) sys.excepthook = catch_unhandled_exception # add version to the usage arguments = docopt( "radish {0}\n{1}".format(__VERSION__, usage), argv=args, version=__VERSION__ ) # store all arguments to configuration dict in terrain.world setup_config(arguments) # disable colors if necessary if world.config.no_ansi: colorful.disable() else: colorful.use_8_ansi_colors() # load needed extensions extensions.load(world.config) core = Core() if world.config.profile: msg = ( "Command line argument -p/--profile will be removed in a future version. Please " "use -u/--user-data instead." ) warnings.warn(msg, DeprecationWarning, stacklevel=1) feature_files = [] for given_feature in world.config.features: if not os.path.exists(given_feature): raise FeatureFileNotFoundError(given_feature) if os.path.isdir(given_feature): feature_files.extend(utils.recursive_glob(given_feature, "*.feature")) continue feature_files.append(given_feature) # parse tag expressions tag_expression = None if world.config.tags: tag_expression = tagexpressions.parse(world.config.tags) core.parse_features(feature_files, tag_expression) if not core.features or sum(len(f.scenarios) for f in core.features) == 0: utils.console_write(colorful.bold_red("Error: ") + colorful.red("No feature or no scenario specified in at least one of the given feature files") ) if tag_expression: utils.console_write(colorful.red( "You have specified a tag expression. Make sure those are valid and actually yield some Scenarios to run." ) ) return 1 argument_dispatcher = [ ((lambda: world.config.show), show_features), ((lambda: True), run_features), ] # radish command dispatching for to_run, method in argument_dispatcher: if to_run(): return method(core)
def command_detection(search_command, commands_dir, rvt_ver, root_dir, project_code): """ Searches command paths for register dict in __init__.py in command roots to prepare appropriate command strings to be inserted into the journal file :param search_command: command name to look up :param commands_dir: commands directory :param rvt_ver: rvt version :param root_dir: :param project_code: :return: """ com_dict = defaultdict() post_proc_dict = defaultdict() found_dir = False for directory in os.scandir(commands_dir): command_name = directory.name # print(command_name) if search_command == command_name: found_dir = True # print(f" found appropriate command directory {op.join(commands_dir, command_name)}") if op.exists(f"{commands_dir}/{command_name}/__init__.py"): mod = machinery.SourceFileLoader( command_name, op.join(commands_dir, command_name, "__init__.py")).load_module() else: print( colorful.bold_red( f" appropriate __init__.py in command directory not found - aborting." )) exit_with_log('__init__.py in command directory not found') if "register" in dir(mod): if mod.register["name"] == command_name: # print("command_name found!") if "get_rps_button" in mod.register: # print("needs rps button") button_name = mod.register["get_rps_button"] rps_button = rps_xml.get_rps_button( rps_xml.find_xml_command(rvt_ver, ""), button_name) com_dict[command_name] = rps_button if "override_jrn_template" in mod.register: rvt_journal_writer.detach_rps_template = mod.register[ "override_jrn_template"] # print("journal template overridden") if "override_addin_template" in mod.register: rvt_journal_writer.rps_addin_template = mod.register[ "override_addin_template"] # print("journal addin overridden") if "override_jrn_command" in mod.register: warnings_command_dir = op.join(root_dir, "warnings" + op.sep) override_command = mod.register[ "override_jrn_command"].format( warnings_command_dir, project_code) # print(override_command) com_dict[command_name] = override_command # print("journal command overridden") if "post_process" in mod.register: external_args = [] for arg in mod.register["post_process"]["args"]: external_args.append(globals().get(arg)) post_proc_dict["func"] = mod.register["post_process"][ "func"] post_proc_dict["args"] = external_args if not com_dict: com_dict[command_name] = "' " # print("com_dict reset") if not found_dir: print( colorful.bold_red( f" appropriate command directory for '{search_command}' not found - aborting." )) exit_with_log('command directory not found') # print(com_dict) return com_dict, post_proc_dict