def run(cls): global result requests.packages.urllib3.disable_warnings() try: cls.print_symbol() usage = """ syntribos <config> <input_file> --test-types=TEST_TYPES syntribos <config> <input_file> -t TEST_TYPE TEST_TYPE ... syntribos <config> <input_file> """ args, unknown = syntribos.arguments.SyntribosCLI( usage=usage).parse_known_args() test_env_manager = TestEnvManager( "", args.config, test_repo_package_name="os") test_env_manager.finalize() cls.set_env() init_root_log_handler() cls.print_log() result = unittest.TextTestResult( unittest.runner._WritelnDecorator(sys.stdout), True, 2 if args.verbose else 1) start_time = time.time() for file_path, req_str in args.input: for test_name, test_class in cls.get_tests(args.test_types): for test in test_class.get_test_cases(file_path, req_str): cls.run_test(test, result, args.dry_run) cls.print_result(result, start_time) except KeyboardInterrupt: cafe.drivers.base.print_exception( "Runner", "run", "Keyboard Interrupt, exiting...") exit(0)
def entry_point(): # Set up arguments argparser = argparse.ArgumentParser(prog='behave-runner') argparser.add_argument( "product", nargs=1, metavar="<product>", help="Product name") argparser.add_argument( "config", nargs=1, metavar="<config_file>", help="Product test config") argparser.add_argument( dest='behave_opts', nargs=argparse.REMAINDER, metavar="<behave_opts>", help="Options to pass to Behave") args = argparser.parse_args() config = str(args.config[0]) product = str(args.product[0]) behave_opts = args.behave_opts test_env_manager = TestEnvManager(product, config) test_env_manager.finalize() product_root_test_path = os.path.join( test_env_manager.test_repo_path, product) """ Attempts to use first positional argument after product config as a sub-path to the test repo path. If not a sub-path, raise exception. """ if behave_opts and not behave_opts[0].startswith('-'): user_provided_path = behave_opts[0] attempted_sub_path = os.path.join( product_root_test_path, user_provided_path.lstrip(os.path.sep)) if os.path.exists(attempted_sub_path): behave_opts[0] = attempted_sub_path else: raise Exception( "{directory} is not a sub-path in the {repo} repo.".format( directory=behave_opts[0], repo=test_env_manager.test_repo_package)) else: behave_opts.insert(0, product_root_test_path) print_mug(behave_opts[0]) behave_opts.insert(0, "behave") subprocess.call(behave_opts) exit(0)
def __init__(self): self.print_mug() self.cl_args = ArgumentParser().parse_args() self.test_env = TestEnvManager("", self.cl_args.config, test_repo_package_name="") self.test_env.test_data_directory = self.test_env.test_data_directory self.test_env.finalize() cclogging.init_root_log_handler() # This is where things diverge from the regular parallel runner # Extract the runfile contents self._log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__)) self.datagen_start = time.time() self.run_file = BrewFile(self.cl_args.runfiles) # Log the runfile here so that it appears in the logs before any tests self._log.debug("\n" + str(self.run_file)) # TODO: Once the parallel_runner is changed to a yielding model, # change this to yielding brews instead of generating a list self.suites = SuiteBuilder(testrepos=self.run_file.brew_modules(), dry_run=self.cl_args.dry_run, exit_on_error=True).get_suites() self.print_configuration(self.test_env, brewfile=self.run_file)
def __call__(self, parser, namespace, values, option_string=None): product = namespace.product or "" test_env_mgr = TestEnvManager( product, None, test_repo_package_name=namespace.test_repo) test_dir = os.path.expanduser( os.path.join(test_env_mgr.test_repo_path, product)) product_config_dir = os.path.expanduser( os.path.join( test_env_mgr.engine_config_interface.config_directory, product)) def _print_test_tree(): print("\n<[TEST REPO]>\n") tree(test_dir, " ", print_files=True) def _print_config_tree(): print("\n<[CONFIGS]>\n") tree(product_config_dir, " ", print_files=True) def _print_product_tree(): print("\n<[PRODUCTS]>\n") tree(test_env_mgr.test_repo_path, " ", print_files=False) def _print_product_list(): print("\n<[PRODUCTS]>\n") print("+-{0}".format(product_config_dir)) print("\n".join([ " +-{0}/".format(dirname) for dirname in os.listdir(product_config_dir) ])) # If no values passed, print a default if not values: if namespace.product and namespace.config: _print_test_tree() elif namespace.product and not namespace.config: _print_config_tree() _print_test_tree() elif not namespace.product and not namespace.config: _print_product_list() # Loop through values so that the trees get printed in the order # the values where passed on the command line for arg in values: if arg == 'products': _print_product_tree() if arg == 'configs': _print_config_tree() if arg == 'tests': _print_test_tree() exit(0)
def pytest_configure(config): if config.getoption('cafe_proj') and config.getoption('cafe_config'): # Setting test repo path variables to pass checks # to validate if the test repos exist os.environ['CAFE_ALLOW_MANAGED_ENV_VAR_OVERRIDES'] = '1' os.environ['CAFE_TEST_REPO_PATH'] = config.args[0] test_env = TestEnvManager(config.getoption('cafe_proj'), config.getoption('cafe_config') + '.config', test_repo_package_name=config.args[0]) test_env.finalize() cclogging.init_root_log_handler() UnittestRunner.print_mug_and_paths(test_env)
def __call__(self, parser, namespace, values, option_string=None): # Make sure user provided config name ends with '.config' if values is not None: if not str(values).endswith('.config'): values = "{0}{1}".format(values, ".config") test_env = TestEnvManager(namespace.product or "", values) if not os.path.exists(test_env.test_config_file_path): print("cafe-runner: error: config file at {0} does not " "exist".format(test_env.test_config_file_path)) exit(1) setattr(namespace, self.dest, values)
def __init__(self): self.cl_args = _UnittestRunnerCLI().get_cl_args() self.test_env = TestEnvManager( self.cl_args.product, self.cl_args.config, test_repo_package_name=self.cl_args.test_repo) # If something in the cl_args is supposed to override a default, like # say that data directory or something, it needs to happen before # finalize() is called self.test_env.test_data_directory = (self.test_env.test_data_directory or self.cl_args.data_directory) self.test_env.finalize() init_root_log_handler() self.product = self.cl_args.product self.print_mug_and_paths(self.test_env)
def __init__(self): self.print_mug() self.cl_args = ArgumentParser().parse_args() self.test_env = TestEnvManager("", self.cl_args.config, test_repo_package_name="") self.test_env.test_data_directory = (self.cl_args.data_directory or self.test_env.test_data_directory) self.test_env.finalize() cclogging.init_root_log_handler() self.print_configuration(self.test_env, self.cl_args.testrepos) self.cl_args.testrepos = import_repos(self.cl_args.testrepos) self.suites = SuiteBuilder( testrepos=self.cl_args.testrepos, tags=self.cl_args.tags, all_tags=self.cl_args.all_tags, regex_list=self.cl_args.regex_list, file_=self.cl_args.file, dry_run=self.cl_args.dry_run, exit_on_error=self.cl_args.exit_on_error).get_suites()
def entry_point(): # Setup and parse arguments arg_parser = argparse.ArgumentParser(prog='specter-runner') args = parse_runner_args(arg_parser) config = str(args.config[0]) + '.config' product = str(args.product[0]) cmd_opts = args.cmd_opts test_env_manager = TestEnvManager(product, config) test_env_manager.finalize() test_path = os.path.join(test_env_manager.test_repo_path, product) call_args = ['specter', '--search', test_path, '--no-art'] if len(cmd_opts) > 0: call_args.extend(cmd_opts) print_mug(name='Specter', brewing_from=test_path) subprocess.call(call_args) exit(0)