コード例 #1
0
 def test_colorize(self):
     CONF.colorize = True
     string = "color this string"
     colors = {"red": 31,
               "green": 32,
               "yellow": 33,
               "blue": 34,
               "nocolor": 0}
     for color in colors:
         self.assertEqual(
             "\033[0;{clr}m{string}\033[0;m".format(
                 string=string, clr=colors[color]),
             colorize(string, color))
コード例 #2
0
ファイル: runner.py プロジェクト: wooyunyang/syntribos
    def run_given_tests(cls, list_of_tests, file_path, req_str,
                        meta_vars=None):
        """Loads all the templates and runs all the given tests

        This method calls run_test method to run each of the tests one
        by one.

        :param list list_of_tests: A list of all the loaded tests
        :param str file_path: Path of the template file
        :param str req_str: Request string of each template

        :return: None
        """
        pool = ThreadPool(CONF.syntribos.threads)
        try:
            template_start_time = time.time()
            failures = 0
            errors = 0
            print("\n  ID \t\tTest Name      \t\t\t\t\t\t    Progress")
            for test_name, test_class in list_of_tests:
                test_class.test_id = cls.current_test_id
                cls.current_test_id += 5

                result_string = "[{test_id}]  :  {name}".format(
                    test_id=cli.colorize(
                        test_class.test_id, color="green"),
                    name=test_name.replace("_", " ").capitalize())
                if CONF.no_colorize:
                    result_string = result_string.ljust(55)
                else:
                    result_string = result_string.ljust(60)
                try:
                    test_class.create_init_request(file_path, req_str,
                                                   meta_vars)
                except Exception:
                    print(_(
                        "Error in parsing template:\n %s\n"
                    ) % traceback.format_exc())
                    LOG.error("Error in parsing template:")
                    break
                test_cases = list(
                    test_class.get_test_cases(file_path, req_str))
                total_tests = len(test_cases)
                if total_tests > 0:
                    log_string = "[{test_id}]  :  {name}".format(
                        test_id=test_class.test_id, name=test_name)
                    LOG.debug(log_string)
                    last_failures = result.stats['unique_failures']
                    last_errors = result.stats['errors']
                    p_bar = cli.ProgressBar(
                        message=result_string, total_len=total_tests)
                    test_class.send_init_request(file_path, req_str, meta_vars)

                    # This line runs the tests
                    pool.map(lambda t: cls.run_test(t, p_bar), test_cases)

                    failures = result.stats['unique_failures'] - last_failures
                    errors = result.stats['errors'] - last_errors
                    failures_str = cli.colorize_by_percent(
                        failures, total_tests, "red")

                    if errors:
                        errors_str = cli.colorize(errors, "red")
                        print(_(
                            "  :  %(fail)s Failure(s), %(err)s Error(s)\r") % {
                                "fail": failures_str, "err": errors_str})
                    else:
                        print(_(
                            "  : %s Failure(s), 0 Error(s)\r") % failures_str)

            run_time = time.time() - template_start_time
            LOG.info(_("Run time: %s sec."), run_time)
            if hasattr(result, "testsRun"):
                num_tests = result.testsRun - result.testsRunSinceLastPrint
                print(_("\nRan %(num)s test(s) in %(time).3f s\n") %
                      {"num": num_tests, "time": run_time})
                result.testsRunSinceLastPrint = result.testsRun

        except KeyboardInterrupt:
            print(_(
                '\n\nPausing...Hit ENTER to continue, type quit to exit.'))
            try:
                response = input()
                if response.lower() == "quit":
                    result.print_result(cls.start_time)
                    cleanup.delete_temps()
                    print(_("Exiting..."))
                    pool.close()
                    pool.join()
                    exit(0)
                print(_('Resuming...'))
            except KeyboardInterrupt:
                result.print_result(cls.start_time)
                cleanup.delete_temps()
                print(_("Exiting..."))
                pool.close()
                pool.join()
                exit(0)
コード例 #3
0
    def run_given_tests(cls, list_of_tests, file_path, req_str):
        """Loads all the templates and runs all the given tests

        This method calls run_test method to run each of the tests one
        by one.

        :param list list_of_tests: A list of all the loaded tests
        :param str file_path: Path of the template file
        :param str req_str: Request string of each template

        :return: None
        """
        try:
            template_start_time = time.time()
            failures = 0
            errors = 0
            print("\n  ID \t\tTest Name      \t\t\t\t\t\t    Progress")
            for test_name, test_class in list_of_tests:
                test_class.test_id = cls.current_test_id
                cls.current_test_id += 5
                log_string = "[{test_id}]  :  {name}".format(
                    test_id=test_class.test_id, name=test_name)
                result_string = "[{test_id}]  :  {name}".format(
                    test_id=cli.colorize(test_class.test_id, color="green"),
                    name=test_name.replace("_", " ").capitalize())
                if not CONF.colorize:
                    result_string = result_string.ljust(55)
                else:
                    result_string = result_string.ljust(60)
                LOG.debug(log_string)
                test_class.send_init_request(file_path, req_str)
                test_cases = list(test_class.get_test_cases(
                    file_path, req_str))
                if len(test_cases) > 0:
                    bar = cli.ProgressBar(message=result_string,
                                          total_len=len(test_cases))
                    last_failures = result.stats["failures"]
                    last_errors = result.stats["errors"]
                    for test in test_cases:
                        if test:
                            cls.run_test(test, result)
                            bar.increment(1)
                        bar.print_bar()
                        failures = result.stats["failures"] - last_failures
                        errors = result.stats["errors"] - last_errors
                        total_tests = len(test_cases)
                        if failures > total_tests * 0.90:
                            # More than 90 percent failure
                            failures = cli.colorize(failures, "red")
                        elif failures > total_tests * 0.45:
                            # More than 45 percent failure
                            failures = cli.colorize(failures, "yellow")
                        elif failures > total_tests * 0.15:
                            # More than 15 percent failure
                            failures = cli.colorize(failures, "blue")
                    if errors:
                        last_failures = result.stats["failures"]
                        last_errors = result.stats["errors"]
                        errors = cli.colorize(errors, "red")
                        print("  :  {0} Failure(s), {1} Error(s)\r".format(
                            failures, errors))
                    else:
                        last_failures = result.stats["failures"]
                        print("  :  {} Failure(s), 0 Error(s)\r".format(
                            failures))

            run_time = time.time() - template_start_time
            LOG.debug("Run time: %s sec.", run_time)
            if hasattr(result, "testsRun"):
                num_tests = result.testsRun - result.testsRunSinceLastPrint
                print("\nRan {num} test(s) in {time:.3f}s\n".format(
                    num=num_tests, time=run_time))
                result.testsRunSinceLastPrint = result.testsRun

        except KeyboardInterrupt:
            print('\n\nPausing...  Hit ENTER to continue, type quit to exit.')
            try:
                response = input()
                if response.lower() == "quit":
                    result.print_result(cls.start_time)
                    cleanup.delete_temps()
                    print("Exiting...")
                    exit(0)
                print('Resuming...')
            except KeyboardInterrupt:
                result.print_result(cls.start_time)
                cleanup.delete_temps()
                print("Exiting...")
                exit(0)
コード例 #4
0
ファイル: runner.py プロジェクト: knangia/syntribos
    def run_given_tests(cls, list_of_tests, file_path, req_str,
                        meta_vars=None):
        """Loads all the templates and runs all the given tests

        This method calls run_test method to run each of the tests one
        by one.

        :param list list_of_tests: A list of all the loaded tests
        :param str file_path: Path of the template file
        :param str req_str: Request string of each template

        :return: None
        """
        try:
            template_start_time = time.time()
            failures = 0
            errors = 0
            print("\n  ID \t\tTest Name      \t\t\t\t\t\t    Progress")
            for test_name, test_class in list_of_tests:
                test_class.test_id = cls.current_test_id
                cls.current_test_id += 5
                log_string = "[{test_id}]  :  {name}".format(
                    test_id=test_class.test_id, name=test_name)
                result_string = "[{test_id}]  :  {name}".format(
                    test_id=cli.colorize(
                        test_class.test_id, color="green"),
                    name=test_name.replace("_", " ").capitalize())
                if not CONF.colorize:
                    result_string = result_string.ljust(55)
                else:
                    result_string = result_string.ljust(60)
                LOG.debug(log_string)
                try:
                    test_class.send_init_request(file_path, req_str, meta_vars)
                except Exception:
                    print(_(
                        "Error in parsing template:\n %s\n"
                    ) % traceback.format_exc())
                    LOG.error(_LE("Error in parsing template:"))
                    break
                test_cases = list(
                    test_class.get_test_cases(file_path, req_str))
                if len(test_cases) > 0:
                    p_bar = cli.ProgressBar(
                        message=result_string, total_len=len(test_cases))
                    last_failures = result.stats["failures"]
                    last_errors = result.stats["errors"]
                    for test in test_cases:
                        if test:
                            cls.run_test(test, result)
                            p_bar.increment(1)
                        p_bar.print_bar()
                        failures = result.stats["failures"] - last_failures
                        errors = result.stats["errors"] - last_errors
                        total_tests = len(test_cases)
                        if failures > total_tests * 0.90:
                            # More than 90 percent failure
                            failures = cli.colorize(failures, "red")
                        elif failures > total_tests * 0.45:
                            # More than 45 percent failure
                            failures = cli.colorize(failures, "yellow")
                        elif failures > total_tests * 0.15:
                            # More than 15 percent failure
                            failures = cli.colorize(failures, "blue")
                    if errors:
                        last_failures = result.stats["failures"]
                        last_errors = result.stats["errors"]
                        errors = cli.colorize(errors, "red")
                        print(_(
                            "  :  %(fail)s Failure(s), %(err)s Error(s)\r") % {
                                "fail": failures, "err": errors})
                    else:
                        last_failures = result.stats["failures"]
                        print(
                            _(
                                "  : %s Failure(s), 0 Error(s)\r") % failures)

            run_time = time.time() - template_start_time
            LOG.info(_("Run time: %s sec."), run_time)
            if hasattr(result, "testsRun"):
                num_tests = result.testsRun - result.testsRunSinceLastPrint
                print(_("\nRan %(num)s test(s) in %(time).3f s\n") %
                      {"num": num_tests, "time": run_time})
                result.testsRunSinceLastPrint = result.testsRun

        except KeyboardInterrupt:
            print(_(
                '\n\nPausing...Hit ENTER to continue, type quit to exit.'))
            try:
                response = input()
                if response.lower() == "quit":
                    result.print_result(cls.start_time)
                    cleanup.delete_temps()
                    print(_("Exiting..."))
                    exit(0)
                print(_('Resuming...'))
            except KeyboardInterrupt:
                result.print_result(cls.start_time)
                cleanup.delete_temps()
                print(_("Exiting..."))
                exit(0)
コード例 #5
0
 def test_no_colorize(self):
     CONF.no_colorize = True
     string = "No color"
     self.assertEqual(string, colorize(string))
コード例 #6
0
ファイル: runner.py プロジェクト: rahulunair/syntribos
    def run_given_tests(cls, list_of_tests, file_path, req_str,
                        meta_vars=None):
        """Loads all the templates and runs all the given tests

        This method calls run_test method to run each of the tests one
        by one.

        :param list list_of_tests: A list of all the loaded tests
        :param str file_path: Path of the template file
        :param str req_str: Request string of each template

        :return: None
        """
        pool = ThreadPool(CONF.syntribos.threads)
        try:
            template_start_time = time.time()
            failures = 0
            errors = 0
            print("\n  ID \t\tTest Name      \t\t\t\t\t\t    Progress")
            for test_name, test_class in list_of_tests:
                test_class.test_id = cls.current_test_id
                cls.current_test_id += 5

                result_string = "[{test_id}]  :  {name}".format(
                    test_id=cli.colorize(
                        test_class.test_id, color="green"),
                    name=test_name.replace("_", " ").capitalize())
                if not CONF.colorize:
                    result_string = result_string.ljust(55)
                else:
                    result_string = result_string.ljust(60)
                try:
                    test_class.create_init_request(file_path, req_str,
                                                   meta_vars)
                except Exception:
                    print(_(
                        "Error in parsing template:\n %s\n"
                    ) % traceback.format_exc())
                    LOG.error("Error in parsing template:")
                    break
                test_cases = list(
                    test_class.get_test_cases(file_path, req_str, meta_vars))
                total_tests = len(test_cases)
                if total_tests > 0:
                    log_string = "[{test_id}]  :  {name}".format(
                        test_id=test_class.test_id, name=test_name)
                    LOG.debug(log_string)
                    last_failures = result.stats['unique_failures']
                    last_errors = result.stats['errors']
                    p_bar = cli.ProgressBar(
                        message=result_string, total_len=total_tests)
                    test_class.send_init_request(file_path, req_str, meta_vars)

                    # This line runs the tests
                    pool.map(lambda t: cls.run_test(t, p_bar), test_cases)

                    failures = result.stats['unique_failures'] - last_failures
                    errors = result.stats['errors'] - last_errors
                    failures_str = cli.colorize_by_percent(
                        failures, total_tests)

                    if errors:
                        errors_str = cli.colorize(errors, "red")
                        print(_(
                            "  :  %(fail)s Failure(s), %(err)s Error(s)\r") % {
                                "fail": failures_str, "err": errors_str})
                    else:
                        print(_(
                            "  : %s Failure(s), 0 Error(s)\r") % failures_str)

            run_time = time.time() - template_start_time
            LOG.info(_("Run time: %s sec."), run_time)
            if hasattr(result, "testsRun"):
                num_tests = result.testsRun - result.testsRunSinceLastPrint
                print(_("\nRan %(num)s test(s) in %(time).3f s\n") %
                      {"num": num_tests, "time": run_time})
                result.testsRunSinceLastPrint = result.testsRun

        except KeyboardInterrupt:
            print(_(
                '\n\nPausing...Hit ENTER to continue, type quit to exit.'))
            try:
                response = input()
                if response.lower() == "quit":
                    result.print_result(cls.start_time)
                    cleanup.delete_temps()
                    print(_("Exiting..."))
                    pool.close()
                    pool.join()
                    exit(0)
                print(_('Resuming...'))
            except KeyboardInterrupt:
                result.print_result(cls.start_time)
                cleanup.delete_temps()
                print(_("Exiting..."))
                pool.close()
                pool.join()
                exit(0)