예제 #1
0
def entry_point():

    # Set up arguments
    argparser = argparse.ArgumentParser(prog='cafe-cleaner')

    argparser.add_argument(
        "product",
        nargs=1,
        metavar="<product>",
        help="Product name")

    argparser.add_argument(
        "config",
        nargs=1,
        metavar="<config_file>",
        help="Product test config")

    args = argparser.parse_args()
    config = str(args.config[0])
    product = str(args.product[0])

    test_env_manager = TestEnvManager(product, config)
    test_env_manager.finalize()
    compute_cleanup()
    exit(0)
예제 #2
0
파일: runner.py 프로젝트: izrik/opencafe
def entry_point():

    # Set up arguments
    argparser = argparse.ArgumentParser(prog='behave-runner')

    argparser.add_argument(
        "product",
        nargs=1,
        metavar="<product>",
        help="Product name")

    argparser.add_argument(
        "config",
        nargs=1,
        metavar="<config_file>",
        help="Product test config")

    argparser.add_argument(
        dest='behave_opts',
        nargs=argparse.REMAINDER,
        metavar="<behave_opts>",
        help="Options to pass to Behave")

    args = argparser.parse_args()
    config = str(args.config[0])
    product = str(args.product[0])
    behave_opts = args.behave_opts

    test_env_manager = TestEnvManager(product, config)
    test_env_manager.finalize()

    product_root_test_path = os.path.join(
        test_env_manager.test_repo_path, product)

    """
    Attempts to use first positional argument after product config as a
    sub-path to the test repo path. If not a sub-path, raise exception.
    """
    if behave_opts and not behave_opts[0].startswith('-'):
        user_provided_path = behave_opts[0]
        attempted_sub_path = os.path.join(
            product_root_test_path,
            user_provided_path.lstrip(os.path.sep))

        if os.path.exists(attempted_sub_path):
            behave_opts[0] = attempted_sub_path
        else:
            raise Exception(
                "{directory} is not a sub-path in the {repo} repo.".format(
                    directory=behave_opts[0],
                    repo=test_env_manager.test_repo_package))
    else:
        behave_opts.insert(0, product_root_test_path)

    print_mug(behave_opts[0])
    behave_opts.insert(0, "behave")

    subprocess.call(behave_opts)

    exit(0)
예제 #3
0
def entry_point():

    # Setup and parse arguments
    arg_parser = argparse.ArgumentParser(prog='specter-runner')
    args = parse_runner_args(arg_parser)

    config = str(args.config[0]) + '.config'
    product = str(args.product[0])
    cmd_opts = args.cmd_opts

    test_env_manager = TestEnvManager(product, config)
    test_env_manager.finalize()

    test_path = os.path.join(
        test_env_manager.test_repo_path, product)

    call_args = ['specter', '--search', test_path, '--no-art']

    if len(cmd_opts) > 0:
        call_args.extend(cmd_opts)

    print_mug(name='Specter', brewing_from=test_path)

    subprocess.call(call_args)
    exit(0)
예제 #4
0
    def __init__(self):
        self.print_mug()
        self.cl_args = ArgumentParser().parse_args()
        self.test_env = TestEnvManager("",
                                       self.cl_args.config,
                                       test_repo_package_name="")
        self.test_env.test_data_directory = self.test_env.test_data_directory
        self.test_env.finalize()
        cclogging.init_root_log_handler()

        # This is where things diverge from the regular parallel runner
        # Extract the runfile contents
        self._log = cclogging.getLogger(
            cclogging.get_object_namespace(self.__class__))
        self.datagen_start = time.time()
        self.run_file = BrewFile(self.cl_args.runfiles)

        # Log the runfile here so that it appears in the logs before any tests
        self._log.debug("\n" + str(self.run_file))

        # TODO: Once the parallel_runner is changed to a yielding model,
        #       change this to yielding brews instead of generating a list
        self.suites = SuiteBuilder(testrepos=self.run_file.brew_modules(),
                                   dry_run=self.cl_args.dry_run,
                                   exit_on_error=True).get_suites()

        self.print_configuration(self.test_env, brewfile=self.run_file)
예제 #5
0
    def run(cls):
        global result
        requests.packages.urllib3.disable_warnings()
        try:
            cls.print_symbol()
            usage = """
                syntribos <config> <input_file> --test-types=TEST_TYPES
                syntribos <config> <input_file> -t TEST_TYPE TEST_TYPE ...
                syntribos <config> <input_file>
                """
            args, unknown = syntribos.arguments.SyntribosCLI(
                usage=usage).parse_known_args()
            test_env_manager = TestEnvManager(
                "", args.config, test_repo_package_name="os")
            test_env_manager.finalize()
            cls.set_env()
            init_root_log_handler()

            cls.print_log()
            result = unittest.TextTestResult(
                unittest.runner._WritelnDecorator(sys.stdout),
                True, 2 if args.verbose else 1)
            start_time = time.time()
            for file_path, req_str in args.input:
                for test_name, test_class in cls.get_tests(args.test_types):
                    for test in test_class.get_test_cases(file_path, req_str):
                        cls.run_test(test, result, args.dry_run)
            cls.print_result(result, start_time)
        except KeyboardInterrupt:
            cafe.drivers.base.print_exception(
                "Runner",
                "run",
                "Keyboard Interrupt, exiting...")
            exit(0)
def entry_point():

    # Set up arguments
    argparser = argparse.ArgumentParser(prog='cafe-build-all')

    argparser.add_argument(
        "product",
        metavar="<product>",
        help="Product name")

    argparser.add_argument(
        "config",
        metavar="<config_file>",
        help="Product test config")

    argparser.add_argument(
        "--image-filter",
        metavar="<image_filter>",
        help="")

    argparser.add_argument(
        "--flavor-filter",
        metavar="<flavor_filter>",
        help="")

    argparser.add_argument(
        "--key",
        metavar="<key>",
        help="The name of a existing Compute keypair. "
             "A keypair is required for OnMetal instances.")

    args = argparser.parse_args()
    config = args.config
    product = args.product
    key = args.key

    test_env_manager = TestEnvManager(product, config)
    test_env_manager.finalize()
    compute = ComputeComposite()

    image_filter = args.image_filter
    resp = compute.images.client.list_images()
    images = resp.entity

    filtered_images = filter(lambda i: image_filter in i.name, images)

    flavor_filter = args.flavor_filter
    resp = compute.flavors.client.list_flavors_with_detail()
    flavors = resp.entity

    filtered_flavors = filter(lambda f: flavor_filter in f.name, flavors)
    pairs = list(generate_image_flavor_pairs(filtered_images, filtered_flavors))
    builder(pairs, key)
    exit(0)
예제 #7
0
def pytest_configure(config):

    if config.getoption('cafe_proj') and config.getoption('cafe_config'):
        # Setting test repo path variables to pass checks
        # to validate if the test repos exist
        os.environ['CAFE_ALLOW_MANAGED_ENV_VAR_OVERRIDES'] = '1'
        os.environ['CAFE_TEST_REPO_PATH'] = config.args[0]
        test_env = TestEnvManager(config.getoption('cafe_proj'),
                                  config.getoption('cafe_config') + '.config',
                                  test_repo_package_name=config.args[0])
        test_env.finalize()
        cclogging.init_root_log_handler()
        UnittestRunner.print_mug_and_paths(test_env)
예제 #8
0
    def __init__(self):
        self.print_mug()
        self.cl_args = ArgumentParser().parse_args()
        self.test_env = TestEnvManager(
            "", self.cl_args.config, test_repo_package_name="")
        self.test_env.test_data_directory = self.test_env.test_data_directory
        self.test_env.finalize()
        cclogging.init_root_log_handler()

        # This is where things diverge from the regular parallel runner
        # Extract the runfile contents
        self._log = cclogging.getLogger(
            cclogging.get_object_namespace(self.__class__))
        self.datagen_start = time.time()
        self.run_file = BrewFile(self.cl_args.runfiles)

        # Log the runfile here so that it appears in the logs before any tests
        self._log.debug("\n" + str(self.run_file))

        # TODO: Once the parallel_runner is changed to a yielding model,
        #       change this to yielding brews instead of generating a list
        self.suites = SuiteBuilder(
            testrepos=self.run_file.brew_modules(),
            dry_run=self.cl_args.dry_run,
            exit_on_error=True).get_suites()

        self.print_configuration(self.test_env, brewfile=self.run_file)
예제 #9
0
    def __init__(self):
        self.cl_args = _UnittestRunnerCLI().get_cl_args()
        self.test_env = TestEnvManager(
            self.cl_args.product,
            self.cl_args.config,
            test_repo_package_name=self.cl_args.test_repo)

        # If something in the cl_args is supposed to override a default, like
        # say that data directory or something, it needs to happen before
        # finalize() is called
        self.test_env.test_data_directory = (self.test_env.test_data_directory
                                             or self.cl_args.data_directory)
        self.test_env.finalize()
        init_root_log_handler()
        self.product = self.cl_args.product
        self.print_mug_and_paths(self.test_env)
예제 #10
0
        def __call__(self, parser, namespace, values, option_string=None):

            product = namespace.product or ""
            test_env_mgr = TestEnvManager(
                product, None, test_repo_package_name=namespace.test_repo)
            test_dir = os.path.expanduser(
                os.path.join(test_env_mgr.test_repo_path, product))
            product_config_dir = os.path.expanduser(
                os.path.join(
                    test_env_mgr.engine_config_interface.config_directory,
                    product))

            def _print_test_tree():
                print("\n<[TEST REPO]>\n")
                tree(test_dir, " ", print_files=True)

            def _print_config_tree():
                print("\n<[CONFIGS]>\n")
                tree(product_config_dir, " ", print_files=True)

            def _print_product_tree():
                print("\n<[PRODUCTS]>\n")
                tree(test_env_mgr.test_repo_path, " ", print_files=False)

            def _print_product_list():
                print("\n<[PRODUCTS]>\n")
                print("+-{0}".format(product_config_dir))
                print("\n".join([
                    "  +-{0}/".format(dirname)
                    for dirname in os.listdir(product_config_dir)
                ]))

            # If no values passed, print a default
            if not values:
                if namespace.product and namespace.config:
                    _print_test_tree()
                elif namespace.product and not namespace.config:
                    _print_config_tree()
                    _print_test_tree()
                elif not namespace.product and not namespace.config:
                    _print_product_list()

            # Loop through values so that the trees get printed in the order
            # the values where passed on the command line
            for arg in values:
                if arg == 'products':
                    _print_product_tree()

                if arg == 'configs':
                    _print_config_tree()

                if arg == 'tests':
                    _print_test_tree()

            exit(0)
예제 #11
0
    def __init__(self):
        self.print_mug()
        self.cl_args = ArgumentParser().parse_args()
        self.test_env = TestEnvManager("",
                                       self.cl_args.config,
                                       test_repo_package_name="")
        self.test_env.test_data_directory = (self.cl_args.data_directory or
                                             self.test_env.test_data_directory)
        self.test_env.finalize()
        cclogging.init_root_log_handler()
        self.print_configuration(self.test_env, self.cl_args.testrepos)
        self.cl_args.testrepos = import_repos(self.cl_args.testrepos)

        self.suites = SuiteBuilder(
            testrepos=self.cl_args.testrepos,
            tags=self.cl_args.tags,
            all_tags=self.cl_args.all_tags,
            regex_list=self.cl_args.regex_list,
            file_=self.cl_args.file,
            dry_run=self.cl_args.dry_run,
            exit_on_error=self.cl_args.exit_on_error).get_suites()
예제 #12
0
        def __call__(self, parser, namespace, values, option_string=None):
            # Make sure user provided config name ends with '.config'
            if values is not None:
                if not str(values).endswith('.config'):
                    values = "{0}{1}".format(values, ".config")

                test_env = TestEnvManager(namespace.product or "", values)
                if not os.path.exists(test_env.test_config_file_path):
                    print("cafe-runner: error: config file at {0} does not "
                          "exist".format(test_env.test_config_file_path))
                    exit(1)

            setattr(namespace, self.dest, values)
예제 #13
0
    def __init__(self):
        self.cl_args = _UnittestRunnerCLI().get_cl_args()
        self.test_env = TestEnvManager(
            self.cl_args.product, self.cl_args.config)

        # If something in the cl_args is supposed to override a default, like
        # say that data directory or something, it needs to happen before
        # finalize() is called
        self.test_env.test_data_directory = (
            self.test_env.test_data_directory or self.cl_args.data_directory)
        self.product_repo_path = os.path.join(
            self.test_env.test_repo_path, self.cl_args.product)
        self.test_env.finalize()
        self.print_mug_and_paths(self.test_env)
예제 #14
0
파일: runner.py 프로젝트: izrik/opencafe
def entry_point():

    # Setup and parse arguments
    arg_parser = argparse.ArgumentParser(prog='specter-runner')
    args = parse_runner_args(arg_parser)

    config = str(args.config[0]) + '.config'
    product = str(args.product[0])
    cmd_opts = args.cmd_opts

    test_env_manager = TestEnvManager(product, config)
    test_env_manager.finalize()

    test_path = os.path.join(test_env_manager.test_repo_path, product)

    call_args = ['specter', '--search', test_path, '--no-art']

    if len(cmd_opts) > 0:
        call_args.extend(cmd_opts)

    print_mug(name='Specter', brewing_from=test_path)

    subprocess.call(call_args)
    exit(0)
예제 #15
0
    def __init__(self):
        self.cl_args = _UnittestRunnerCLI().get_cl_args()
        self.test_env = TestEnvManager(
            self.cl_args.product, self.cl_args.config)

        # If something in the cl_args is supposed to override a default, like
        # say that data directory or something, it needs to happen before
        # finalize() is called
        self.test_env.test_data_directory = (
            self.test_env.test_data_directory or self.cl_args.data_directory)
        self.test_env.finalize()
        init_root_log_handler()
        self.product = self.cl_args.product
        self.test_repo = (
            self.test_env.engine_config_interface.default_test_repo)
        self.print_mug_and_paths(self.test_env)
예제 #16
0
파일: runner.py 프로젝트: CafeHub/opencafe
    def __init__(self):
        self.cl_args = _UnittestRunnerCLI().get_cl_args()
        self.test_env = TestEnvManager(
            self.cl_args.product, self.cl_args.config,
            test_repo_package_name=self.cl_args.test_repo)

        # If something in the cl_args is supposed to override a default, like
        # say that data directory or something, it needs to happen before
        # finalize() is called
        self.test_env.test_data_directory = (
            self.test_env.test_data_directory or self.cl_args.data_directory)
        self.test_env.finalize()
        cclogging.init_root_log_handler()
        self._log = cclogging.getLogger(
            cclogging.get_object_namespace(self.__class__))
        self.product = self.cl_args.product
        self.print_mug_and_paths(self.test_env)
예제 #17
0
    def __init__(self):
        self.print_mug()
        self.cl_args = ArgumentParser().parse_args()
        self.test_env = TestEnvManager("", self.cl_args.config, test_repo_package_name="")
        self.test_env.test_data_directory = self.cl_args.data_directory or self.test_env.test_data_directory
        self.test_env.finalize()
        cclogging.init_root_log_handler()
        self.print_configuration(self.test_env, self.cl_args.testrepos)
        self.cl_args.testrepos = import_repos(self.cl_args.testrepos)

        self.suites = SuiteBuilder(
            testrepos=self.cl_args.testrepos,
            tags=self.cl_args.tags,
            all_tags=self.cl_args.all_tags,
            regex_list=self.cl_args.regex_list,
            file_=self.cl_args.file,
            dry_run=self.cl_args.dry_run,
            exit_on_error=self.cl_args.exit_on_error,
        ).get_suites()
예제 #18
0
class UnittestRunner(object):
    """OpenCafe UnittestRunner"""
    def __init__(self):
        self.print_mug()
        self.cl_args = ArgumentParser().parse_args()
        self.test_env = TestEnvManager(
            "", self.cl_args.config, test_repo_package_name="")
        self.test_env.test_data_directory = (
            self.cl_args.data_directory or self.test_env.test_data_directory)
        self.test_env.finalize()
        cclogging.init_root_log_handler()
        self.print_configuration(self.test_env, self.cl_args.testrepos)
        self.datagen_start = time.time()
        self.cl_args.testrepos = import_repos(self.cl_args.testrepos)

        self.suites = SuiteBuilder(
            testrepos=self.cl_args.testrepos,
            tags=self.cl_args.tags,
            all_tags=self.cl_args.all_tags,
            regex_list=self.cl_args.regex_list,
            file_=self.cl_args.file,
            dry_run=self.cl_args.dry_run,
            exit_on_error=self.cl_args.exit_on_error).get_suites()

    def run(self):
        """Starts the run of the tests"""
        results = []
        worker_list = []
        to_worker = Queue()
        from_worker = Queue()
        verbose = self.cl_args.verbose
        failfast = self.cl_args.failfast
        workers = int(not self.cl_args.parallel) or self.cl_args.workers

        for suite in self.suites:
            to_worker.put(suite)

        for _ in range(workers):
            to_worker.put(None)

        start = time.time()
        # A second try catch is needed here because queues can cause locking
        # when they go out of scope, especially when termination signals used
        try:
            for _ in range(workers):
                proc = Consumer(to_worker, from_worker, verbose, failfast)
                worker_list.append(proc)
                proc.start()

            for _ in self.suites:
                results.append(self.log_result(from_worker.get()))

            end = time.time()
            tests_run, errors, failures = self.compile_results(
                run_time=end - start, datagen_time=start - self.datagen_start,
                results=results)

        except KeyboardInterrupt:
            print_exception("Runner", "run", "Keyboard Interrupt, exiting...")
            os.killpg(0, 9)
        return bool(sum([errors, failures, not tests_run]))

    @staticmethod
    def print_mug():
        """Prints the cafe mug"""
        print("""
    ( (
     ) )
  .........
  |       |___
  |       |_  |
  |  :-)  |_| |
  |       |___|
  |_______|
=== CAFE Runner ===""")

    @staticmethod
    def print_configuration(test_env, repos):
        """Prints the config/logs/repo/data_directory"""
        print("=" * 150)
        print("Percolated Configuration")
        print("-" * 150)
        if repos:
            print("BREWING FROM: ....: {0}".format(repos[0]))
            for repo in repos[1:]:
                print("{0}{1}".format(" " * 20, repo))
        print("ENGINE CONFIG FILE: {0}".format(test_env.engine_config_path))
        print("TEST CONFIG FILE..: {0}".format(test_env.test_config_file_path))
        print("DATA DIRECTORY....: {0}".format(test_env.test_data_directory))
        print("LOG PATH..........: {0}".format(test_env.test_log_dir))
        print("=" * 150)

    @staticmethod
    def log_result(dic):
        """Gets logs and stream from Comsumer and outputs to logs and stdout.
           Then is clears the stream and prints the errors to stream for later
           output.
        """
        handlers = logging.getLogger().handlers
        # handlers can be added here to allow for extensible log storage
        for record in dic.get("logs"):
            for handler in handlers:
                handler.emit(record)

        # this line can be replace to add an extensible stdout/err location
        sys.stderr.write("{0}\n".format(dic["result"].stream.buf.strip()))
        sys.stderr.flush()
        dic["result"].stream.seek(0)
        dic["result"].stream.truncate()
        dic["result"].printErrors()
        dic["result"].stream.seek(0)
        return dic

    def compile_results(self, run_time, datagen_time, results):
        """Summarizes results and writes results to file if --result used"""
        all_results = []
        result_dict = {"tests": 0, "errors": 0, "failures": 0}
        for dic in results:
            result = dic["result"]
            tests = [suite for suite in self.suites
                     if suite.cafe_uuid == dic["cafe_uuid"]][0]
            result_parser = SummarizeResults(
                result_dict=vars(result), tests=tests, execution_time=run_time,
                datagen_time=datagen_time)
            all_results += result_parser.gather_results()
            summary = result_parser.summary_result()
            for key in result_dict:
                result_dict[key] += summary[key]

            if result.stream.buf.strip():
                # this line can be replace to add an extensible stdout/err log
                sys.stderr.write("{0}\n\n".format(
                    result.stream.buf.strip()))

        if self.cl_args.result is not None:
            reporter = Reporter(result_parser, all_results)
            reporter.generate_report(
                self.cl_args.result, self.cl_args.result_directory)
        return self.print_results(
            run_time=run_time, datagen_time=datagen_time, **result_dict)

    def print_results(self, tests, errors, failures, run_time, datagen_time):
        """Prints results summerized in compile_results messages"""
        print("{0}".format("-" * 70))
        print("Ran {0} test{1} in {2:.3f}s".format(
            tests, "s" * bool(tests - 1), run_time))
        print("Generated datasets in {0:.3f}s".format(datagen_time))
        print("Total runtime {0:.3f}s".format(run_time + datagen_time))

        if failures or errors:
            print("\nFAILED ({0}{1}{2})".format(
                "failures={0}".format(failures) if failures else "",
                ", " if failures and errors else "",
                "errors={0}".format(errors) if errors else ""))
        print("{0}\nDetailed logs: {1}\n{2}".format(
            "=" * 150, self.test_env.test_log_dir, "-" * 150))
        return tests, errors, failures
예제 #19
0
파일: test4.py 프로젝트: rzurga/cafeclass
from cafe.configurator.managers import TestEnvManager
import os
import logging

root = logging.getLogger()
root.addHandler(logging.StreamHandler())
root.setLevel(0)

env = TestEnvManager("", "a.config", None, "os")

env.finalize()

for k, v in os.environ.items():
    if "CAFE" in k:
        print k, v
예제 #20
0
파일: runner.py 프로젝트: CafeHub/opencafe
class UnittestRunner(object):

    def __init__(self):
        self.cl_args = _UnittestRunnerCLI().get_cl_args()
        self.test_env = TestEnvManager(
            self.cl_args.product, self.cl_args.config,
            test_repo_package_name=self.cl_args.test_repo)

        # If something in the cl_args is supposed to override a default, like
        # say that data directory or something, it needs to happen before
        # finalize() is called
        self.test_env.test_data_directory = (
            self.test_env.test_data_directory or self.cl_args.data_directory)
        self.test_env.finalize()
        cclogging.init_root_log_handler()
        self._log = cclogging.getLogger(
            cclogging.get_object_namespace(self.__class__))
        self.product = self.cl_args.product
        self.print_mug_and_paths(self.test_env)

    @staticmethod
    def print_mug_and_paths(test_env):
        print("""
    ( (
     ) )
  .........
  |       |___
  |       |_  |
  |  :-)  |_| |
  |       |___|
  |_______|
=== CAFE Runner ===""")
        print("=" * 150)
        print("Percolated Configuration")
        print("-" * 150)
        print("BREWING FROM: ....: {0}".format(test_env.test_repo_path))
        print("ENGINE CONFIG FILE: {0}".format(test_env.engine_config_path))
        print("TEST CONFIG FILE..: {0}".format(test_env.test_config_file_path))
        print("DATA DIRECTORY....: {0}".format(test_env.test_data_directory))
        print("LOG PATH..........: {0}".format(test_env.test_log_dir))
        print("=" * 150)

    @staticmethod
    def execute_test(runner, test_id, test, results):
        result = runner.run(test)
        results.update({test_id: result})

    @staticmethod
    def get_runner(cl_args):
        test_runner = None

        # Use the parallel text runner so the console logs look correct
        if cl_args.parallel:
            test_runner = OpenCafeParallelTextTestRunner(
                verbosity=cl_args.verbose)
        else:
            test_runner = unittest.TextTestRunner(verbosity=cl_args.verbose)

        test_runner.failfast = cl_args.fail_fast
        return test_runner

    @staticmethod
    def dump_results(start, finish, results):
        print("-" * 71)

        tests_run = 0
        errors = 0
        failures = 0
        for key, result in list(results.items()):
            tests_run += result.testsRun
            errors += len(result.errors)
            failures += len(result.failures)

        print("Ran {0} test{1} in {2:.3f}s".format(
            tests_run, "s" if tests_run != 1 else "", finish - start))

        if failures or errors:
            print("\nFAILED ({0}{1}{2})".format(
                "Failures={0}".format(failures) if failures else "",
                " " if failures and errors else "",
                "Errors={0}".format(errors) if errors else ""))

        return errors, failures, tests_run

    def run(self):
        """
        loops through all the packages, modules, and methods sent in from
        the command line and runs them
        """
        master_suite = OpenCafeUnittestTestSuite()
        parallel_test_list = []
        test_count = 0

        builder = SuiteBuilder(self.cl_args, self.test_env.test_repo_package)
        test_runner = self.get_runner(self.cl_args)

        if self.cl_args.parallel:
            parallel_test_list = builder.generate_suite_list()
            test_count = len(parallel_test_list)
            if self.cl_args.dry_run:
                for suite in parallel_test_list:
                    for test in suite:
                        print(test)
                exit(0)
            exit_code = self.run_parallel(
                parallel_test_list, test_runner,
                result_type=self.cl_args.result,
                results_path=self.cl_args.result_directory)
        else:
            master_suite = builder.generate_suite()
            test_count = master_suite.countTestCases()
            if self.cl_args.dry_run:
                for test in master_suite:
                    print(test)
                exit(0)
            exit_code = self.run_serialized(
                master_suite, test_runner, result_type=self.cl_args.result,
                results_path=self.cl_args.result_directory)

        """
        Exit with a non-zero exit code if no tests where run, so that
        external monitoring programs (like Jenkins) can tell
        something is up
        """
        if test_count <= 0:
            exit_code = 1
        exit(exit_code)

    def run_parallel(
            self, test_suites, test_runner, result_type=None,
            results_path=None):

        exit_code = 0
        proc = None
        unittest.installHandler()
        processes = []
        manager = Manager()
        results = manager.dict()
        manager.dict()
        start = time.time()

        test_mapping = {}
        for test_suite in test_suites:
            # Give each test suite an uuid so it can be
            # matched to the correct test result
            test_id = str(uuid.uuid4())
            test_mapping[test_id] = test_suite

            proc = Process(
                target=self.execute_test,
                args=(test_runner, test_id, test_suite, results))
            processes.append(proc)
            proc.start()

        for proc in processes:
            proc.join()

        finish = time.time()

        errors, failures, _ = self.dump_results(start, finish, results)

        if result_type is not None:
            all_results = []
            for test_id, result in list(results.items()):
                tests = test_mapping[test_id]
                result_parser = SummarizeResults(
                    vars(result), tests, (finish - start))
                all_results += result_parser.gather_results()

            reporter = Reporter(
                result_parser=result_parser, all_results=all_results)
            reporter.generate_report(
                result_type=result_type, path=results_path)

        if failures or errors:
            exit_code = 1

        return exit_code

    def run_serialized(
            self, master_suite, test_runner, result_type=None,
            results_path=None):

        exit_code = 0
        unittest.installHandler()
        start_time = time.time()
        result = test_runner.run(master_suite)
        total_execution_time = time.time() - start_time

        if result_type is not None:
            result_parser = SummarizeResults(
                vars(result), master_suite, total_execution_time)
            all_results = result_parser.gather_results()
            reporter = Reporter(
                result_parser=result_parser, all_results=all_results)
            reporter.generate_report(
                result_type=result_type, path=results_path)

        self._log_results(result)
        if not result.wasSuccessful():
            exit_code = 1

        return exit_code

    def _log_results(self, result):
        """Replicates the printing functionality of unittest's runner.run() but
        log's instead of prints
        """

        infos = []
        expected_fails = unexpected_successes = skipped = 0

        try:
            results = list(map(len, (
                result.expectedFailures, result.unexpectedSuccesses,
                result.skipped)))
            expected_fails, unexpected_successes, skipped = results
        except AttributeError:
            pass

        if not result.wasSuccessful():
            failed, errored = list(map(len, (result.failures, result.errors)))

            if failed:
                infos.append("failures={0}".format(failed))
            if errored:
                infos.append("errors={0}".format(errored))

            self.log_errors('ERROR', result, result.errors)
            self.log_errors('FAIL', result, result.failures)
            self._log.info("Ran {0} Tests".format(result.testsRun))
            self._log.info('FAILED ')
        else:
            self._log.info("Ran {0} Tests".format(result.testsRun))
            self._log.info("Passing all tests")

        if skipped:
            infos.append("skipped={0}".format(str(skipped)))
        if expected_fails:
            infos.append("expected failures={0}".format(expected_fails))
        if unexpected_successes:
            infos.append("unexpected successes={0}".format(
                str(unexpected_successes)))
        if infos:
            self._log.info(" ({0})\n".format((", ".join(infos),)))
        else:
            self._log.info("\n")

        print('=' * 150)
        print("Detailed logs: {0}".format(os.getenv("CAFE_TEST_LOG_PATH")))
        print('-' * 150)

    def log_errors(self, label, result, errors):
        border1 = '=' * 45
        border2 = '-' * 45

        for test, err in errors:
            msg = "{0}: {1}\n".format(label, result.getDescription(test))
            self._log.info(
                "{0}\n{1}\n{2}\n{3}".format(border1, msg, border2, err))
예제 #21
0
class BrewRunner(UnittestRunner):
    """OpenCafe BrewFile Runner"""
    def __init__(self):
        self.print_mug()
        self.cl_args = ArgumentParser().parse_args()
        self.test_env = TestEnvManager("",
                                       self.cl_args.config,
                                       test_repo_package_name="")
        self.test_env.test_data_directory = self.test_env.test_data_directory
        self.test_env.finalize()
        cclogging.init_root_log_handler()

        # This is where things diverge from the regular parallel runner
        # Extract the runfile contents
        self._log = cclogging.getLogger(
            cclogging.get_object_namespace(self.__class__))
        self.datagen_start = time.time()
        self.run_file = BrewFile(self.cl_args.runfiles)

        # Log the runfile here so that it appears in the logs before any tests
        self._log.debug("\n" + str(self.run_file))

        # TODO: Once the parallel_runner is changed to a yielding model,
        #       change this to yielding brews instead of generating a list
        self.suites = SuiteBuilder(testrepos=self.run_file.brew_modules(),
                                   dry_run=self.cl_args.dry_run,
                                   exit_on_error=True).get_suites()

        self.print_configuration(self.test_env, brewfile=self.run_file)

    def print_configuration(self, test_env, repos=None, brewfile=None):
        """Prints the config/logs/repo/data_directory/brewfiles"""
        print("=" * 150)
        print("Percolated Configuration")
        print("-" * 150)
        if brewfile:
            print("BREW FILES........:")
            print("\t\t" + "\n\t\t    ".join(brewfile.files))
            if self.cl_args.verbose >= 2:
                print("BREWS............:")
                print("\t" + "\n\t".join(brewfile.brews_to_strings()))
        if repos:
            print("BREWING FROM: ....: {0}".format(repos[0]))
            for repo in repos[1:]:
                print("{0}{1}".format(" " * 20, repo))
        print("ENGINE CONFIG FILE: {0}".format(test_env.engine_config_path))
        print("TEST CONFIG FILE..: {0}".format(test_env.test_config_file_path))
        print("DATA DIRECTORY....: {0}".format(test_env.test_data_directory))
        print("LOG PATH..........: {0}".format(test_env.test_log_dir))
        print("=" * 150)

    @staticmethod
    def print_mug():
        """Prints the cafe 'mug'"""
        print("""
        /~~~~~~~~~~~~~~~~~~~~~~~/|
       /              /######/ / |
      /              /______/ /  |
     ========================= /||
     |_______________________|/ ||
      |  \****/     \__,,__/    ||
      |===\**/       __,,__     ||
      |______________\====/%____||
      |   ___        /~~~~\ %  / |
     _|  |===|===   /      \%_/  |
    | |  |###|     |########| | /
    |____\###/______\######/__|/
    ~~~~~~~~~~~~~~~~~~~~~~~~~~
===      CAFE Brewfile Runner      ===""")
예제 #22
0
class UnittestRunner(object):
    def __init__(self):
        self.cl_args = _UnittestRunnerCLI().get_cl_args()
        self.test_env = TestEnvManager(
            self.cl_args.product,
            self.cl_args.config,
            test_repo_package_name=self.cl_args.test_repo)

        # If something in the cl_args is supposed to override a default, like
        # say that data directory or something, it needs to happen before
        # finalize() is called
        self.test_env.test_data_directory = (self.test_env.test_data_directory
                                             or self.cl_args.data_directory)
        self.test_env.finalize()
        cclogging.init_root_log_handler()
        self._log = cclogging.getLogger(
            cclogging.get_object_namespace(self.__class__))
        self.product = self.cl_args.product
        self.print_mug_and_paths(self.test_env)

    @staticmethod
    def print_mug_and_paths(test_env):
        print("""
    ( (
     ) )
  .........
  |       |___
  |       |_  |
  |  :-)  |_| |
  |       |___|
  |_______|
=== CAFE Runner ===""")
        print("=" * 150)
        print("Percolated Configuration")
        print("-" * 150)
        print("BREWING FROM: ....: {0}".format(test_env.test_repo_path))
        print("ENGINE CONFIG FILE: {0}".format(test_env.engine_config_path))
        print("TEST CONFIG FILE..: {0}".format(test_env.test_config_file_path))
        print("DATA DIRECTORY....: {0}".format(test_env.test_data_directory))
        print("LOG PATH..........: {0}".format(test_env.test_log_dir))
        print("=" * 150)

    @staticmethod
    def execute_test(runner, test_id, test, results):
        result = runner.run(test)
        results.update({test_id: result})

    @staticmethod
    def get_runner(cl_args):
        test_runner = None

        # Use the parallel text runner so the console logs look correct
        if cl_args.parallel:
            test_runner = OpenCafeParallelTextTestRunner(
                verbosity=cl_args.verbose)
        else:
            test_runner = unittest.TextTestRunner(verbosity=cl_args.verbose)

        test_runner.failfast = cl_args.fail_fast
        return test_runner

    @staticmethod
    def dump_results(start, finish, results):
        print("-" * 71)

        tests_run = 0
        errors = 0
        failures = 0
        for key, result in list(results.items()):
            tests_run += result.testsRun
            errors += len(result.errors)
            failures += len(result.failures)

        print("Ran {0} test{1} in {2:.3f}s".format(
            tests_run, "s" if tests_run != 1 else "", finish - start))

        if failures or errors:
            print("\nFAILED ({0}{1}{2})".format(
                "Failures={0}".format(failures) if failures else "",
                " " if failures and errors else "",
                "Errors={0}".format(errors) if errors else ""))

        return errors, failures, tests_run

    def run(self):
        """
        loops through all the packages, modules, and methods sent in from
        the command line and runs them
        """
        master_suite = OpenCafeUnittestTestSuite()
        parallel_test_list = []
        test_count = 0

        builder = SuiteBuilder(self.cl_args, self.test_env.test_repo_package)
        test_runner = self.get_runner(self.cl_args)

        if self.cl_args.parallel:
            parallel_test_list = builder.generate_suite_list()
            test_count = len(parallel_test_list)
            if self.cl_args.dry_run:
                for suite in parallel_test_list:
                    for test in suite:
                        print(test)
                exit(0)
            exit_code = self.run_parallel(
                parallel_test_list,
                test_runner,
                result_type=self.cl_args.result,
                results_path=self.cl_args.result_directory)
        else:
            master_suite = builder.generate_suite()
            test_count = master_suite.countTestCases()
            if self.cl_args.dry_run:
                for test in master_suite:
                    print(test)
                exit(0)
            exit_code = self.run_serialized(
                master_suite,
                test_runner,
                result_type=self.cl_args.result,
                results_path=self.cl_args.result_directory)
        """
        Exit with a non-zero exit code if no tests where run, so that
        external monitoring programs (like Jenkins) can tell
        something is up
        """
        if test_count <= 0:
            exit_code = 1
        exit(exit_code)

    def run_parallel(self,
                     test_suites,
                     test_runner,
                     result_type=None,
                     results_path=None):

        exit_code = 0
        proc = None
        unittest.installHandler()
        processes = []
        manager = Manager()
        results = manager.dict()
        manager.dict()
        start = time.time()

        test_mapping = {}
        for test_suite in test_suites:
            # Give each test suite an uuid so it can be
            # matched to the correct test result
            test_id = str(uuid.uuid4())
            test_mapping[test_id] = test_suite

            proc = Process(target=self.execute_test,
                           args=(test_runner, test_id, test_suite, results))
            processes.append(proc)
            proc.start()

        for proc in processes:
            proc.join()

        finish = time.time()

        errors, failures, _ = self.dump_results(start, finish, results)

        if result_type is not None:
            all_results = []
            for test_id, result in list(results.items()):
                tests = test_mapping[test_id]
                result_parser = SummarizeResults(vars(result), tests,
                                                 (finish - start))
                all_results += result_parser.gather_results()

            reporter = Reporter(result_parser=result_parser,
                                all_results=all_results)
            reporter.generate_report(result_type=result_type,
                                     path=results_path)

        if failures or errors:
            exit_code = 1

        return exit_code

    def run_serialized(self,
                       master_suite,
                       test_runner,
                       result_type=None,
                       results_path=None):

        exit_code = 0
        unittest.installHandler()
        start_time = time.time()
        result = test_runner.run(master_suite)
        total_execution_time = time.time() - start_time

        if result_type is not None:
            result_parser = SummarizeResults(vars(result), master_suite,
                                             total_execution_time)
            all_results = result_parser.gather_results()
            reporter = Reporter(result_parser=result_parser,
                                all_results=all_results)
            reporter.generate_report(result_type=result_type,
                                     path=results_path)

        self._log_results(result)
        if not result.wasSuccessful():
            exit_code = 1

        return exit_code

    def _log_results(self, result):
        """Replicates the printing functionality of unittest's runner.run() but
        log's instead of prints
        """

        infos = []
        expected_fails = unexpected_successes = skipped = 0

        try:
            results = list(
                map(len, (result.expectedFailures, result.unexpectedSuccesses,
                          result.skipped)))
            expected_fails, unexpected_successes, skipped = results
        except AttributeError:
            pass

        if not result.wasSuccessful():
            failed, errored = list(map(len, (result.failures, result.errors)))

            if failed:
                infos.append("failures={0}".format(failed))
            if errored:
                infos.append("errors={0}".format(errored))

            self.log_errors('ERROR', result, result.errors)
            self.log_errors('FAIL', result, result.failures)
            self._log.info("Ran {0} Tests".format(result.testsRun))
            self._log.info('FAILED ')
        else:
            self._log.info("Ran {0} Tests".format(result.testsRun))
            self._log.info("Passing all tests")

        if skipped:
            infos.append("skipped={0}".format(str(skipped)))
        if expected_fails:
            infos.append("expected failures={0}".format(expected_fails))
        if unexpected_successes:
            infos.append("unexpected successes={0}".format(
                str(unexpected_successes)))
        if infos:
            self._log.info(" ({0})\n".format((", ".join(infos), )))
        else:
            self._log.info("\n")

        print('=' * 150)
        print("Detailed logs: {0}".format(os.getenv("CAFE_TEST_LOG_PATH")))
        print('-' * 150)

    def log_errors(self, label, result, errors):
        border1 = '=' * 45
        border2 = '-' * 45

        for test, err in errors:
            msg = "{0}: {1}\n".format(label, result.getDescription(test))
            self._log.info("{0}\n{1}\n{2}\n{3}".format(border1, msg, border2,
                                                       err))
예제 #23
0
class UnittestRunner(object):
    def __init__(self):
        self.cl_args = _UnittestRunnerCLI().get_cl_args()
        self.test_env = TestEnvManager(
            self.cl_args.product,
            self.cl_args.config,
            test_repo_package_name=self.cl_args.test_repo)

        # If something in the cl_args is supposed to override a default, like
        # say that data directory or something, it needs to happen before
        # finalize() is called
        self.test_env.test_data_directory = (self.test_env.test_data_directory
                                             or self.cl_args.data_directory)
        self.test_env.finalize()
        init_root_log_handler()
        self.product = self.cl_args.product
        self.print_mug_and_paths(self.test_env)

    @staticmethod
    def print_mug_and_paths(test_env):
        print """
    ( (
     ) )
  .........
  |       |___
  |       |_  |
  |  :-)  |_| |
  |       |___|
  |_______|
=== CAFE Runner ==="""
        print "=" * 150
        print "Percolated Configuration"
        print "-" * 150
        print "BREWING FROM: ....: {0}".format(test_env.test_repo_path)
        print "ENGINE CONFIG FILE: {0}".format(test_env.engine_config_path)
        print "TEST CONFIG FILE..: {0}".format(test_env.test_config_file_path)
        print "DATA DIRECTORY....: {0}".format(test_env.test_data_directory)
        print "LOG PATH..........: {0}".format(test_env.test_log_dir)
        print "=" * 150

    @staticmethod
    def execute_test(runner, test_id, test, results):
        result = runner.run(test)
        results.update({test_id: result})

    @staticmethod
    def get_runner(cl_args):
        test_runner = None

        # Use the parallel text runner so the console logs look correct
        if cl_args.parallel:
            test_runner = OpenCafeParallelTextTestRunner(
                verbosity=cl_args.verbose)
        else:
            test_runner = unittest.TextTestRunner(verbosity=cl_args.verbose)

        test_runner.failfast = cl_args.fail_fast
        return test_runner

    @staticmethod
    def dump_results(start, finish, results):
        print "-" * 71

        tests_run = 0
        errors = 0
        failures = 0
        for key, result in results.items():
            tests_run += result.testsRun
            errors += len(result.errors)
            failures += len(result.failures)

        print "Ran {0} test{1} in {2:.3f}s".format(
            tests_run, "s" if tests_run != 1 else "", finish - start)

        if failures or errors:
            print "\nFAILED ({0}{1}{2})".format(
                "Failures={0}".format(failures) if failures else "",
                " " if failures and errors else "",
                "Errors={0}".format(errors) if errors else "")

        return errors, failures, tests_run

    def run(self):
        """
        loops through all the packages, modules, and methods sent in from
        the command line and runs them
        """
        master_suite = OpenCafeUnittestTestSuite()
        parallel_test_list = []

        builder = SuiteBuilder(self.cl_args, self.test_env.test_repo_package)
        test_runner = self.get_runner(self.cl_args)

        if self.cl_args.parallel:
            parallel_test_list = builder.generate_suite_list()
            if self.cl_args.dry_run:
                for suite in parallel_test_list:
                    for test in suite:
                        print test
                exit(0)
            exit_code = self.run_parallel(
                parallel_test_list,
                test_runner,
                result_type=self.cl_args.result,
                results_path=self.cl_args.result_directory)
            exit(exit_code)
        else:
            master_suite = builder.generate_suite()
            if self.cl_args.dry_run:
                for test in master_suite:
                    print test
                exit(0)
            exit_code = self.run_serialized(
                master_suite,
                test_runner,
                result_type=self.cl_args.result,
                results_path=self.cl_args.result_directory)

            exit(exit_code)

    def run_parallel(self,
                     test_suites,
                     test_runner,
                     result_type=None,
                     results_path=None):

        exit_code = 0
        proc = None
        unittest.installHandler()
        processes = []
        manager = Manager()
        results = manager.dict()
        manager.dict()
        start = time.time()

        test_mapping = {}
        for test_suite in test_suites:
            # Give each test suite an uuid so it can be
            # matched to the correct test result
            test_id = str(uuid.uuid4())
            test_mapping[test_id] = test_suite

            proc = Process(target=self.execute_test,
                           args=(test_runner, test_id, test_suite, results))
            processes.append(proc)
            proc.start()

        for proc in processes:
            proc.join()

        finish = time.time()

        errors, failures, _ = self.dump_results(start, finish, results)

        if result_type is not None:
            all_results = []
            for test_id, result in results.items():
                tests = test_mapping[test_id]
                result_parser = SummarizeResults(vars(result), tests,
                                                 (finish - start))
                all_results += result_parser.gather_results()

            reporter = Reporter(result_parser=result_parser,
                                all_results=all_results)
            reporter.generate_report(result_type=result_type,
                                     path=results_path)

        if failures or errors:
            exit_code = 1

        return exit_code

    def run_serialized(self,
                       master_suite,
                       test_runner,
                       result_type=None,
                       results_path=None):

        exit_code = 0
        unittest.installHandler()
        start_time = time.time()
        result = test_runner.run(master_suite)
        total_execution_time = time.time() - start_time

        if result_type is not None:
            result_parser = SummarizeResults(vars(result), master_suite,
                                             total_execution_time)
            all_results = result_parser.gather_results()
            reporter = Reporter(result_parser=result_parser,
                                all_results=all_results)
            reporter.generate_report(result_type=result_type,
                                     path=results_path)

        log_results(result)
        if not result.wasSuccessful():
            exit_code = 1

        return exit_code
예제 #24
0
class UnittestRunner(object):
    """OpenCafe UnittestRunner"""
    def __init__(self):
        self.print_mug()
        self.cl_args = ArgumentParser().parse_args()
        self.test_env = TestEnvManager("",
                                       self.cl_args.config,
                                       test_repo_package_name="")
        self.test_env.test_data_directory = (self.cl_args.data_directory or
                                             self.test_env.test_data_directory)
        self.test_env.finalize()
        cclogging.init_root_log_handler()
        self.print_configuration(self.test_env, self.cl_args.testrepos)
        self.datagen_start = time.time()
        self.cl_args.testrepos = import_repos(self.cl_args.testrepos)

        self.suites = SuiteBuilder(
            testrepos=self.cl_args.testrepos,
            tags=self.cl_args.tags,
            all_tags=self.cl_args.all_tags,
            regex_list=self.cl_args.regex_list,
            file_=self.cl_args.file,
            dry_run=self.cl_args.dry_run,
            exit_on_error=self.cl_args.exit_on_error).get_suites()

    def run(self):
        """Starts the run of the tests"""
        results = []
        worker_list = []
        to_worker = Queue()
        from_worker = Queue()
        verbose = self.cl_args.verbose
        failfast = self.cl_args.failfast
        workers = int(not self.cl_args.parallel) or self.cl_args.workers

        for suite in self.suites:
            to_worker.put(suite)

        for _ in range(workers):
            to_worker.put(None)

        start = time.time()
        # A second try catch is needed here because queues can cause locking
        # when they go out of scope, especially when termination signals used
        try:
            for _ in range(workers):
                proc = Consumer(to_worker, from_worker, verbose, failfast)
                worker_list.append(proc)
                proc.start()

            for _ in self.suites:
                results.append(self.log_result(from_worker.get()))

            end = time.time()
            tests_run, errors, failures = self.compile_results(
                run_time=end - start,
                datagen_time=start - self.datagen_start,
                results=results)

        except KeyboardInterrupt:
            print_exception("Runner", "run", "Keyboard Interrupt, exiting...")
            os.killpg(0, 9)
        return bool(sum([errors, failures, not tests_run]))

    @staticmethod
    def print_mug():
        """Prints the cafe mug"""
        print("""
    ( (
     ) )
  .........
  |       |___
  |       |_  |
  |  :-)  |_| |
  |       |___|
  |_______|
=== CAFE Runner ===""")

    @staticmethod
    def print_configuration(test_env, repos):
        """Prints the config/logs/repo/data_directory"""
        print("=" * 150)
        print("Percolated Configuration")
        print("-" * 150)
        if repos:
            print("BREWING FROM: ....: {0}".format(repos[0]))
            for repo in repos[1:]:
                print("{0}{1}".format(" " * 20, repo))
        print("ENGINE CONFIG FILE: {0}".format(test_env.engine_config_path))
        print("TEST CONFIG FILE..: {0}".format(test_env.test_config_file_path))
        print("DATA DIRECTORY....: {0}".format(test_env.test_data_directory))
        print("LOG PATH..........: {0}".format(test_env.test_log_dir))
        print("=" * 150)

    @staticmethod
    def log_result(dic):
        """Gets logs and stream from Comsumer and outputs to logs and stdout.
           Then is clears the stream and prints the errors to stream for later
           output.
        """
        handlers = logging.getLogger().handlers
        # handlers can be added here to allow for extensible log storage
        for record in dic.get("logs"):
            for handler in handlers:
                handler.emit(record)

        # this line can be replace to add an extensible stdout/err location
        sys.stderr.write("{0}\n".format(dic["result"].stream.buf.strip()))
        sys.stderr.flush()
        dic["result"].stream.seek(0)
        dic["result"].stream.truncate()
        dic["result"].printErrors()
        dic["result"].stream.seek(0)
        return dic

    def compile_results(self, run_time, datagen_time, results):
        """Summarizes results and writes results to file if --result used"""
        all_results = []
        result_dict = {"tests": 0, "errors": 0, "failures": 0, "skipped": 0}
        for dic in results:
            result = dic["result"]
            tests = [
                suite for suite in self.suites
                if suite.cafe_uuid == dic["cafe_uuid"]
            ][0]
            result_parser = SummarizeResults(result_dict=vars(result),
                                             tests=tests,
                                             execution_time=run_time,
                                             datagen_time=datagen_time)
            all_results += result_parser.gather_results()
            summary = result_parser.summary_result()
            for key in result_dict:
                result_dict[key] += summary[key]

            if result.stream.buf.strip():
                # this line can be replace to add an extensible stdout/err log
                sys.stderr.write("{0}\n\n".format(result.stream.buf.strip()))

        if self.cl_args.result is not None:
            reporter = Reporter(result_parser, all_results)
            reporter.generate_report(self.cl_args.result,
                                     self.cl_args.result_directory)
        return self.print_results(run_time=run_time,
                                  datagen_time=datagen_time,
                                  **result_dict)

    def print_results(self, tests, errors, failures, skipped, run_time,
                      datagen_time):
        """Prints results summerized in compile_results messages"""
        print("{0}".format("-" * 70))
        print("Ran {0} test{1} in {2:.3f}s".format(tests,
                                                   "s" * bool(tests - 1),
                                                   run_time))
        print("Generated datasets in {0:.3f}s".format(datagen_time))
        print("Total runtime {0:.3f}s".format(run_time + datagen_time))

        results = []
        if failures:
            results.append("failures={0}".format(failures))
        if skipped:
            results.append("skipped={0}".format(skipped))
        if errors:
            results.append("errors={0}".format(errors))

        status = "FAILED" if failures or errors else "PASSED"
        print("\n{} ".format(status), end="")
        if results:
            print("({})".format(", ".join(results)))
        print("{0}\nDetailed logs: {1}\n{2}".format("=" * 150,
                                                    self.test_env.test_log_dir,
                                                    "-" * 150))
        return tests, errors, failures
예제 #25
0
class UnittestRunner(object):

    def __init__(self):
        self.cl_args = _UnittestRunnerCLI().get_cl_args()
        self.test_env = TestEnvManager(
            self.cl_args.product, self.cl_args.config,
            test_repo_package_name=self.cl_args.test_repo)

        # If something in the cl_args is supposed to override a default, like
        # say that data directory or something, it needs to happen before
        # finalize() is called
        self.test_env.test_data_directory = (
            self.test_env.test_data_directory or self.cl_args.data_directory)
        self.test_env.finalize()
        init_root_log_handler()
        self.product = self.cl_args.product
        self.print_mug_and_paths(self.test_env)

    @staticmethod
    def print_mug_and_paths(test_env):
        print """
    ( (
     ) )
  .........
  |       |___
  |       |_  |
  |  :-)  |_| |
  |       |___|
  |_______|
=== CAFE Runner ==="""
        print "=" * 150
        print "Percolated Configuration"
        print "-" * 150
        print "BREWING FROM: ....: {0}".format(test_env.test_repo_path)
        print "ENGINE CONFIG FILE: {0}".format(test_env.engine_config_path)
        print "TEST CONFIG FILE..: {0}".format(test_env.test_config_file_path)
        print "DATA DIRECTORY....: {0}".format(test_env.test_data_directory)
        print "LOG PATH..........: {0}".format(test_env.test_log_dir)
        print "=" * 150

    @staticmethod
    def execute_test(runner, test_id, test, results):
        result = runner.run(test)
        results.update({test_id: result})

    @staticmethod
    def get_runner(cl_args):
        test_runner = None

        # Use the parallel text runner so the console logs look correct
        if cl_args.parallel:
            test_runner = OpenCafeParallelTextTestRunner(
                verbosity=cl_args.verbose)
        else:
            test_runner = unittest.TextTestRunner(verbosity=cl_args.verbose)

        test_runner.failfast = cl_args.fail_fast
        return test_runner

    @staticmethod
    def dump_results(start, finish, results):
        print "-" * 71

        tests_run = 0
        errors = 0
        failures = 0
        for key, result in results.items():
            tests_run += result.testsRun
            errors += len(result.errors)
            failures += len(result.failures)

        print "Ran {0} test{1} in {2:.3f}s".format(
            tests_run, "s" if tests_run != 1 else "", finish - start)

        if failures or errors:
            print "\nFAILED ({0}{1}{2})".format(
                "Failures={0}".format(failures) if failures else "",
                " " if failures and errors else "",
                "Errors={0}".format(errors) if errors else "")

        return errors, failures, tests_run

    def run(self):
        """
        loops through all the packages, modules, and methods sent in from
        the command line and runs them
        """
        master_suite = OpenCafeUnittestTestSuite()
        parallel_test_list = []

        builder = SuiteBuilder(self.cl_args, self.test_env.test_repo_package)
        test_runner = self.get_runner(self.cl_args)

        if self.cl_args.parallel:
            parallel_test_list = builder.generate_suite_list()
            if self.cl_args.dry_run:
                for suite in parallel_test_list:
                    for test in suite:
                        print test
                exit(0)
            exit_code = self.run_parallel(
                parallel_test_list, test_runner,
                result_type=self.cl_args.result,
                results_path=self.cl_args.result_directory)
            exit(exit_code)
        else:
            master_suite = builder.generate_suite()
            if self.cl_args.dry_run:
                for test in master_suite:
                    print test
                exit(0)
            exit_code = self.run_serialized(
                master_suite, test_runner, result_type=self.cl_args.result,
                results_path=self.cl_args.result_directory)

            exit(exit_code)

    def run_parallel(
            self, test_suites, test_runner, result_type=None,
            results_path=None):

        exit_code = 0
        proc = None
        unittest.installHandler()
        processes = []
        manager = Manager()
        results = manager.dict()
        manager.dict()
        start = time.time()

        test_mapping = {}
        for test_suite in test_suites:
            # Give each test suite an uuid so it can be
            # matched to the correct test result
            test_id = str(uuid.uuid4())
            test_mapping[test_id] = test_suite

            proc = Process(
                target=self.execute_test,
                args=(test_runner, test_id, test_suite, results))
            processes.append(proc)
            proc.start()

        for proc in processes:
            proc.join()

        finish = time.time()

        errors, failures, _ = self.dump_results(start, finish, results)

        if result_type is not None:
            all_results = []
            for test_id, result in results.items():
                tests = test_mapping[test_id]
                result_parser = SummarizeResults(
                    vars(result), tests, (finish - start))
                all_results += result_parser.gather_results()

            reporter = Reporter(
                result_parser=result_parser, all_results=all_results)
            reporter.generate_report(
                result_type=result_type, path=results_path)

        if failures or errors:
            exit_code = 1

        return exit_code

    def run_serialized(
            self, master_suite, test_runner, result_type=None,
            results_path=None):

        exit_code = 0
        unittest.installHandler()
        start_time = time.time()
        result = test_runner.run(master_suite)
        total_execution_time = time.time() - start_time

        if result_type is not None:
            result_parser = SummarizeResults(
                vars(result), master_suite, total_execution_time)
            all_results = result_parser.gather_results()
            reporter = Reporter(
                result_parser=result_parser, all_results=all_results)
            reporter.generate_report(
                result_type=result_type, path=results_path)

        log_results(result)
        if not result.wasSuccessful():
            exit_code = 1

        return exit_code
예제 #26
0
class BrewRunner(UnittestRunner):
    """OpenCafe BrewFile Runner"""
    def __init__(self):
        self.print_mug()
        self.cl_args = ArgumentParser().parse_args()
        self.test_env = TestEnvManager(
            "", self.cl_args.config, test_repo_package_name="")
        self.test_env.test_data_directory = self.test_env.test_data_directory
        self.test_env.finalize()
        cclogging.init_root_log_handler()

        # This is where things diverge from the regular parallel runner
        # Extract the runfile contents
        self._log = cclogging.getLogger(
            cclogging.get_object_namespace(self.__class__))
        self.datagen_start = time.time()
        self.run_file = BrewFile(self.cl_args.runfiles)

        # Log the runfile here so that it appears in the logs before any tests
        self._log.debug("\n" + str(self.run_file))

        # TODO: Once the parallel_runner is changed to a yielding model,
        #       change this to yielding brews instead of generating a list
        self.suites = SuiteBuilder(
            testrepos=self.run_file.brew_modules(),
            dry_run=self.cl_args.dry_run,
            exit_on_error=True).get_suites()

        self.print_configuration(self.test_env, brewfile=self.run_file)

    def print_configuration(self, test_env, repos=None, brewfile=None):
        """Prints the config/logs/repo/data_directory/brewfiles"""
        print("=" * 150)
        print("Percolated Configuration")
        print("-" * 150)
        if brewfile:
            print("BREW FILES........:")
            print("\t\t" + "\n\t\t    ".join(brewfile.files))
            if self.cl_args.verbose >= 2:
                print("BREWS............:")
                print "\t" + "\n\t".join(brewfile.brews_to_strings())
        if repos:
            print("BREWING FROM: ....: {0}".format(repos[0]))
            for repo in repos[1:]:
                print("{0}{1}".format(" " * 20, repo))
        print("ENGINE CONFIG FILE: {0}".format(test_env.engine_config_path))
        print("TEST CONFIG FILE..: {0}".format(test_env.test_config_file_path))
        print("DATA DIRECTORY....: {0}".format(test_env.test_data_directory))
        print("LOG PATH..........: {0}".format(test_env.test_log_dir))
        print("=" * 150)

    @staticmethod
    def print_mug():
        """Prints the cafe 'mug'"""
        print("""
        /~~~~~~~~~~~~~~~~~~~~~~~/|
       /              /######/ / |
      /              /______/ /  |
     ========================= /||
     |_______________________|/ ||
      |  \****/     \__,,__/    ||
      |===\**/       __,,__     ||
      |______________\====/%____||
      |   ___        /~~~~\ %  / |
     _|  |===|===   /      \%_/  |
    | |  |###|     |########| | /
    |____\###/______\######/__|/
    ~~~~~~~~~~~~~~~~~~~~~~~~~~
===      CAFE Brewfile Runner      ===""")