コード例 #1
0
ファイル: runner.py プロジェクト: wooyunyang/syntribos
    def run_given_tests(cls, list_of_tests, file_path, req_str,
                        meta_vars=None):
        """Loads all the templates and runs all the given tests

        This method calls run_test method to run each of the tests one
        by one.

        :param list list_of_tests: A list of all the loaded tests
        :param str file_path: Path of the template file
        :param str req_str: Request string of each template

        :return: None
        """
        pool = ThreadPool(CONF.syntribos.threads)
        try:
            template_start_time = time.time()
            failures = 0
            errors = 0
            print("\n  ID \t\tTest Name      \t\t\t\t\t\t    Progress")
            for test_name, test_class in list_of_tests:
                test_class.test_id = cls.current_test_id
                cls.current_test_id += 5

                result_string = "[{test_id}]  :  {name}".format(
                    test_id=cli.colorize(
                        test_class.test_id, color="green"),
                    name=test_name.replace("_", " ").capitalize())
                if CONF.no_colorize:
                    result_string = result_string.ljust(55)
                else:
                    result_string = result_string.ljust(60)
                try:
                    test_class.create_init_request(file_path, req_str,
                                                   meta_vars)
                except Exception:
                    print(_(
                        "Error in parsing template:\n %s\n"
                    ) % traceback.format_exc())
                    LOG.error("Error in parsing template:")
                    break
                test_cases = list(
                    test_class.get_test_cases(file_path, req_str))
                total_tests = len(test_cases)
                if total_tests > 0:
                    log_string = "[{test_id}]  :  {name}".format(
                        test_id=test_class.test_id, name=test_name)
                    LOG.debug(log_string)
                    last_failures = result.stats['unique_failures']
                    last_errors = result.stats['errors']
                    p_bar = cli.ProgressBar(
                        message=result_string, total_len=total_tests)
                    test_class.send_init_request(file_path, req_str, meta_vars)

                    # This line runs the tests
                    pool.map(lambda t: cls.run_test(t, p_bar), test_cases)

                    failures = result.stats['unique_failures'] - last_failures
                    errors = result.stats['errors'] - last_errors
                    failures_str = cli.colorize_by_percent(
                        failures, total_tests, "red")

                    if errors:
                        errors_str = cli.colorize(errors, "red")
                        print(_(
                            "  :  %(fail)s Failure(s), %(err)s Error(s)\r") % {
                                "fail": failures_str, "err": errors_str})
                    else:
                        print(_(
                            "  : %s Failure(s), 0 Error(s)\r") % failures_str)

            run_time = time.time() - template_start_time
            LOG.info(_("Run time: %s sec."), run_time)
            if hasattr(result, "testsRun"):
                num_tests = result.testsRun - result.testsRunSinceLastPrint
                print(_("\nRan %(num)s test(s) in %(time).3f s\n") %
                      {"num": num_tests, "time": run_time})
                result.testsRunSinceLastPrint = result.testsRun

        except KeyboardInterrupt:
            print(_(
                '\n\nPausing...Hit ENTER to continue, type quit to exit.'))
            try:
                response = input()
                if response.lower() == "quit":
                    result.print_result(cls.start_time)
                    cleanup.delete_temps()
                    print(_("Exiting..."))
                    pool.close()
                    pool.join()
                    exit(0)
                print(_('Resuming...'))
            except KeyboardInterrupt:
                result.print_result(cls.start_time)
                cleanup.delete_temps()
                print(_("Exiting..."))
                pool.close()
                pool.join()
                exit(0)
コード例 #2
0
ファイル: runner.py プロジェクト: wooyunyang/syntribos
    def run(cls):
        """Method sets up logger and decides on Syntribos control flow

        This is the method where control flow of Syntribos is decided
        based on the commands entered. Depending upon commands such
        as ```list_tests``` or ```run``` the respective method is called.
        """
        global result

        cli.print_symbol()

        # If we are initializing, don't look for a default config file
        if "init" in sys.argv:
            cls.setup_config()
        else:
            cls.setup_config(use_file=True)
        try:
            if CONF.sub_command.name == "init":
                ENV.initialize_syntribos_env()
                exit(0)

            elif CONF.sub_command.name == "list_tests":
                cls.list_tests()
                exit(0)

            elif CONF.sub_command.name == "download":
                ENV.download_wrapper()
                exit(0)
        except AttributeError:
            print(
                _(
                    "Not able to run the requested sub command, please check "
                    "the debug logs for more information, exiting..."))
            exit(1)

        if not ENV.is_syntribos_initialized():
            print(_("Syntribos was not initialized. Please run the 'init'"
                    " command or set it up manually. See the README for"
                    " more information about the installation process."))
            exit(1)

        cls.setup_runtime_env()

        decorator = unittest.runner._WritelnDecorator(cls.output)
        result = syntribos.result.IssueTestResult(decorator, True, verbosity=1)

        cls.start_time = time.time()
        if CONF.sub_command.name == "run":
            list_of_tests = list(
                cls.get_tests(CONF.test_types, CONF.excluded_types))
        elif CONF.sub_command.name == "dry_run":
            dry_run_output = {"failures": [], "successes": []}
            list_of_tests = list(cls.get_tests(dry_run=True))

        print(_("\nRunning Tests...:"))
        templates_dir = CONF.syntribos.templates
        if templates_dir is None:
            print(_("Attempting to download templates from {}").format(
                CONF.remote.templates_uri))
            templates_path = remotes.get(CONF.remote.templates_uri)
            try:
                templates_dir = ContentType("r", 0)(templates_path)
            except IOError:
                print(_("Not able to open `%s`; please verify path, "
                        "exiting...") % templates_path)
                exit(1)

        print(_("\nPress Ctrl-C to pause or exit...\n"))
        meta_vars = None
        templates_dir = list(templates_dir)
        cls.meta_dir_dict = {}
        for file_path, file_content in templates_dir:
            if os.path.basename(file_path) == "meta.json":
                meta_path = os.path.dirname(file_path)
                try:
                    cls.meta_dir_dict[meta_path] = json.loads(file_content)
                except Exception:
                    print("Unable to parse %s, skipping..." % file_path)
        for file_path, req_str in templates_dir:
            if "meta.json" in file_path:
                continue
            meta_vars = cls.get_meta_vars(file_path)
            LOG = cls.get_logger(file_path)
            CONF.log_opt_values(LOG, logging.DEBUG)
            if not file_path.endswith(".template"):
                LOG.warning('file.....:%s (SKIPPED - not a .template file)',
                            file_path)
                continue

            test_names = [t for (t, i) in list_of_tests]  # noqa
            log_string = ''.join([
                '\n{0}\nTEMPLATE FILE\n{0}\n'.format('-' * 12),
                'file.......: {0}\n'.format(file_path),
                'tests......: {0}\n'.format(test_names)
            ])
            LOG.debug(log_string)
            print(syntribos.SEP)
            print("Template File...: {}".format(file_path))
            print(syntribos.SEP)

            if CONF.sub_command.name == "run":
                cls.run_given_tests(list_of_tests, file_path,
                                    req_str, meta_vars)
            elif CONF.sub_command.name == "dry_run":
                cls.dry_run(list_of_tests, file_path,
                            req_str, dry_run_output, meta_vars)

        if CONF.sub_command.name == "run":
            result.print_result(cls.start_time)
            cleanup.delete_temps()
        elif CONF.sub_command.name == "dry_run":
            cls.dry_run_report(dry_run_output)
コード例 #3
0
    def run_given_tests(cls, list_of_tests, file_path, req_str):
        """Loads all the templates and runs all the given tests

        This method calls run_test method to run each of the tests one
        by one.

        :param list list_of_tests: A list of all the loaded tests
        :param str file_path: Path of the template file
        :param str req_str: Request string of each template

        :return: None
        """
        try:
            template_start_time = time.time()
            failures = 0
            errors = 0
            print("\n  ID \t\tTest Name      \t\t\t\t\t\t    Progress")
            for test_name, test_class in list_of_tests:
                test_class.test_id = cls.current_test_id
                cls.current_test_id += 5
                log_string = "[{test_id}]  :  {name}".format(
                    test_id=test_class.test_id, name=test_name)
                result_string = "[{test_id}]  :  {name}".format(
                    test_id=cli.colorize(test_class.test_id, color="green"),
                    name=test_name.replace("_", " ").capitalize())
                if not CONF.colorize:
                    result_string = result_string.ljust(55)
                else:
                    result_string = result_string.ljust(60)
                LOG.debug(log_string)
                test_class.send_init_request(file_path, req_str)
                test_cases = list(test_class.get_test_cases(
                    file_path, req_str))
                if len(test_cases) > 0:
                    bar = cli.ProgressBar(message=result_string,
                                          total_len=len(test_cases))
                    last_failures = result.stats["failures"]
                    last_errors = result.stats["errors"]
                    for test in test_cases:
                        if test:
                            cls.run_test(test, result)
                            bar.increment(1)
                        bar.print_bar()
                        failures = result.stats["failures"] - last_failures
                        errors = result.stats["errors"] - last_errors
                        total_tests = len(test_cases)
                        if failures > total_tests * 0.90:
                            # More than 90 percent failure
                            failures = cli.colorize(failures, "red")
                        elif failures > total_tests * 0.45:
                            # More than 45 percent failure
                            failures = cli.colorize(failures, "yellow")
                        elif failures > total_tests * 0.15:
                            # More than 15 percent failure
                            failures = cli.colorize(failures, "blue")
                    if errors:
                        last_failures = result.stats["failures"]
                        last_errors = result.stats["errors"]
                        errors = cli.colorize(errors, "red")
                        print("  :  {0} Failure(s), {1} Error(s)\r".format(
                            failures, errors))
                    else:
                        last_failures = result.stats["failures"]
                        print("  :  {} Failure(s), 0 Error(s)\r".format(
                            failures))

            run_time = time.time() - template_start_time
            LOG.debug("Run time: %s sec.", run_time)
            if hasattr(result, "testsRun"):
                num_tests = result.testsRun - result.testsRunSinceLastPrint
                print("\nRan {num} test(s) in {time:.3f}s\n".format(
                    num=num_tests, time=run_time))
                result.testsRunSinceLastPrint = result.testsRun

        except KeyboardInterrupt:
            print('\n\nPausing...  Hit ENTER to continue, type quit to exit.')
            try:
                response = input()
                if response.lower() == "quit":
                    result.print_result(cls.start_time)
                    cleanup.delete_temps()
                    print("Exiting...")
                    exit(0)
                print('Resuming...')
            except KeyboardInterrupt:
                result.print_result(cls.start_time)
                cleanup.delete_temps()
                print("Exiting...")
                exit(0)
コード例 #4
0
ファイル: runner.py プロジェクト: knangia/syntribos
    def run_given_tests(cls, list_of_tests, file_path, req_str,
                        meta_vars=None):
        """Loads all the templates and runs all the given tests

        This method calls run_test method to run each of the tests one
        by one.

        :param list list_of_tests: A list of all the loaded tests
        :param str file_path: Path of the template file
        :param str req_str: Request string of each template

        :return: None
        """
        try:
            template_start_time = time.time()
            failures = 0
            errors = 0
            print("\n  ID \t\tTest Name      \t\t\t\t\t\t    Progress")
            for test_name, test_class in list_of_tests:
                test_class.test_id = cls.current_test_id
                cls.current_test_id += 5
                log_string = "[{test_id}]  :  {name}".format(
                    test_id=test_class.test_id, name=test_name)
                result_string = "[{test_id}]  :  {name}".format(
                    test_id=cli.colorize(
                        test_class.test_id, color="green"),
                    name=test_name.replace("_", " ").capitalize())
                if not CONF.colorize:
                    result_string = result_string.ljust(55)
                else:
                    result_string = result_string.ljust(60)
                LOG.debug(log_string)
                try:
                    test_class.send_init_request(file_path, req_str, meta_vars)
                except Exception:
                    print(_(
                        "Error in parsing template:\n %s\n"
                    ) % traceback.format_exc())
                    LOG.error(_LE("Error in parsing template:"))
                    break
                test_cases = list(
                    test_class.get_test_cases(file_path, req_str))
                if len(test_cases) > 0:
                    p_bar = cli.ProgressBar(
                        message=result_string, total_len=len(test_cases))
                    last_failures = result.stats["failures"]
                    last_errors = result.stats["errors"]
                    for test in test_cases:
                        if test:
                            cls.run_test(test, result)
                            p_bar.increment(1)
                        p_bar.print_bar()
                        failures = result.stats["failures"] - last_failures
                        errors = result.stats["errors"] - last_errors
                        total_tests = len(test_cases)
                        if failures > total_tests * 0.90:
                            # More than 90 percent failure
                            failures = cli.colorize(failures, "red")
                        elif failures > total_tests * 0.45:
                            # More than 45 percent failure
                            failures = cli.colorize(failures, "yellow")
                        elif failures > total_tests * 0.15:
                            # More than 15 percent failure
                            failures = cli.colorize(failures, "blue")
                    if errors:
                        last_failures = result.stats["failures"]
                        last_errors = result.stats["errors"]
                        errors = cli.colorize(errors, "red")
                        print(_(
                            "  :  %(fail)s Failure(s), %(err)s Error(s)\r") % {
                                "fail": failures, "err": errors})
                    else:
                        last_failures = result.stats["failures"]
                        print(
                            _(
                                "  : %s Failure(s), 0 Error(s)\r") % failures)

            run_time = time.time() - template_start_time
            LOG.info(_("Run time: %s sec."), run_time)
            if hasattr(result, "testsRun"):
                num_tests = result.testsRun - result.testsRunSinceLastPrint
                print(_("\nRan %(num)s test(s) in %(time).3f s\n") %
                      {"num": num_tests, "time": run_time})
                result.testsRunSinceLastPrint = result.testsRun

        except KeyboardInterrupt:
            print(_(
                '\n\nPausing...Hit ENTER to continue, type quit to exit.'))
            try:
                response = input()
                if response.lower() == "quit":
                    result.print_result(cls.start_time)
                    cleanup.delete_temps()
                    print(_("Exiting..."))
                    exit(0)
                print(_('Resuming...'))
            except KeyboardInterrupt:
                result.print_result(cls.start_time)
                cleanup.delete_temps()
                print(_("Exiting..."))
                exit(0)
コード例 #5
0
ファイル: runner.py プロジェクト: knangia/syntribos
    def run(cls):
        """Method sets up logger and decides on Syntribos control flow

        This is the method where control flow of Syntribos is decided
        based on the commands entered. Depending upon commands such
        as ```list_tests``` or ```run``` the respective method is called.
        """
        global result

        cli.print_symbol()

        # If we are initializing, don't look for a default config file
        if "init" in sys.argv:
            cls.setup_config()
        else:
            cls.setup_config(use_file=True)
        try:
            if CONF.sub_command.name == "init":
                ENV.initialize_syntribos_env()
                exit(0)

            elif CONF.sub_command.name == "list_tests":
                cls.list_tests()
                exit(0)

            elif CONF.sub_command.name == "download":
                ENV.download_wrapper()
                exit(0)
        except AttributeError:
            print(
                _(
                    "Not able to run the requested sub command, please check "
                    "the debug logs for more information, exiting..."))
            exit(1)

        if not ENV.is_syntribos_initialized():
            print(_("Syntribos was not initialized. Please run the 'init'"
                    " command or set it up manually. See the README for"
                    " more information about the installation process."))
            exit(1)

        cls.setup_runtime_env()

        decorator = unittest.runner._WritelnDecorator(cls.output)
        result = syntribos.result.IssueTestResult(decorator, True, verbosity=1)

        cls.start_time = time.time()
        if CONF.sub_command.name == "run":
            list_of_tests = list(
                cls.get_tests(CONF.test_types, CONF.excluded_types))
        elif CONF.sub_command.name == "dry_run":
            dry_run_output = {"failures": [], "successes": []}
            list_of_tests = list(cls.get_tests(dry_run=True))

        print(_("\nRunning Tests...:"))
        templates_dir = CONF.syntribos.templates
        if templates_dir is None:
            print(_("Attempting to download templates from {}").format(
                CONF.remote.templates_uri))
            templates_path = remotes.get(CONF.remote.templates_uri)
            try:
                templates_dir = ContentType("r", 0)(templates_path)
            except IOError:
                print(_("Not able to open `%s`; please verify path, "
                        "exiting...") % templates_path)
                exit(1)

        print(_("\nPress Ctrl-C to pause or exit...\n"))
        meta_vars = None
        templates_dir = list(templates_dir)
        cls.meta_dir_dict = {}
        for file_path, file_content in templates_dir:
            if os.path.basename(file_path) == "meta.json":
                meta_path = os.path.dirname(file_path)
                try:
                    cls.meta_dir_dict[meta_path] = json.loads(file_content)
                except Exception:
                    print("Unable to parse %s, skipping..." % file_path)

        for file_path, req_str in templates_dir:
            if "meta.json" in file_path:
                continue
            meta_vars = cls.get_meta_vars(file_path)
            LOG = cls.get_logger(file_path)
            CONF.log_opt_values(LOG, logging.DEBUG)
            if not file_path.endswith(".template"):
                LOG.warning(
                    _LW('file.....:%s (SKIPPED - not a .template file)'),
                    file_path)
                continue

            test_names = [t for (t, i) in list_of_tests]  # noqa
            log_string = ''.join([
                '\n{0}\nTEMPLATE FILE\n{0}\n'.format('-' * 12),
                'file.......: {0}\n'.format(file_path),
                'tests......: {0}\n'.format(test_names)
            ])
            LOG.debug(log_string)
            print(syntribos.SEP)
            print("Template File...: {}".format(file_path))
            print(syntribos.SEP)

            if CONF.sub_command.name == "run":
                cls.run_given_tests(list_of_tests, file_path,
                                    req_str, meta_vars)
            elif CONF.sub_command.name == "dry_run":
                cls.dry_run(list_of_tests, file_path,
                            req_str, dry_run_output, meta_vars)

        if CONF.sub_command.name == "run":
            result.print_result(cls.start_time)
            cleanup.delete_temps()
        elif CONF.sub_command.name == "dry_run":
            cls.dry_run_report(dry_run_output)
コード例 #6
0
ファイル: runner.py プロジェクト: rahulunair/syntribos
    def run_given_tests(cls, list_of_tests, file_path, req_str,
                        meta_vars=None):
        """Loads all the templates and runs all the given tests

        This method calls run_test method to run each of the tests one
        by one.

        :param list list_of_tests: A list of all the loaded tests
        :param str file_path: Path of the template file
        :param str req_str: Request string of each template

        :return: None
        """
        pool = ThreadPool(CONF.syntribos.threads)
        try:
            template_start_time = time.time()
            failures = 0
            errors = 0
            print("\n  ID \t\tTest Name      \t\t\t\t\t\t    Progress")
            for test_name, test_class in list_of_tests:
                test_class.test_id = cls.current_test_id
                cls.current_test_id += 5

                result_string = "[{test_id}]  :  {name}".format(
                    test_id=cli.colorize(
                        test_class.test_id, color="green"),
                    name=test_name.replace("_", " ").capitalize())
                if not CONF.colorize:
                    result_string = result_string.ljust(55)
                else:
                    result_string = result_string.ljust(60)
                try:
                    test_class.create_init_request(file_path, req_str,
                                                   meta_vars)
                except Exception:
                    print(_(
                        "Error in parsing template:\n %s\n"
                    ) % traceback.format_exc())
                    LOG.error("Error in parsing template:")
                    break
                test_cases = list(
                    test_class.get_test_cases(file_path, req_str, meta_vars))
                total_tests = len(test_cases)
                if total_tests > 0:
                    log_string = "[{test_id}]  :  {name}".format(
                        test_id=test_class.test_id, name=test_name)
                    LOG.debug(log_string)
                    last_failures = result.stats['unique_failures']
                    last_errors = result.stats['errors']
                    p_bar = cli.ProgressBar(
                        message=result_string, total_len=total_tests)
                    test_class.send_init_request(file_path, req_str, meta_vars)

                    # This line runs the tests
                    pool.map(lambda t: cls.run_test(t, p_bar), test_cases)

                    failures = result.stats['unique_failures'] - last_failures
                    errors = result.stats['errors'] - last_errors
                    failures_str = cli.colorize_by_percent(
                        failures, total_tests)

                    if errors:
                        errors_str = cli.colorize(errors, "red")
                        print(_(
                            "  :  %(fail)s Failure(s), %(err)s Error(s)\r") % {
                                "fail": failures_str, "err": errors_str})
                    else:
                        print(_(
                            "  : %s Failure(s), 0 Error(s)\r") % failures_str)

            run_time = time.time() - template_start_time
            LOG.info(_("Run time: %s sec."), run_time)
            if hasattr(result, "testsRun"):
                num_tests = result.testsRun - result.testsRunSinceLastPrint
                print(_("\nRan %(num)s test(s) in %(time).3f s\n") %
                      {"num": num_tests, "time": run_time})
                result.testsRunSinceLastPrint = result.testsRun

        except KeyboardInterrupt:
            print(_(
                '\n\nPausing...Hit ENTER to continue, type quit to exit.'))
            try:
                response = input()
                if response.lower() == "quit":
                    result.print_result(cls.start_time)
                    cleanup.delete_temps()
                    print(_("Exiting..."))
                    pool.close()
                    pool.join()
                    exit(0)
                print(_('Resuming...'))
            except KeyboardInterrupt:
                result.print_result(cls.start_time)
                cleanup.delete_temps()
                print(_("Exiting..."))
                pool.close()
                pool.join()
                exit(0)