Exemplo n.º 1
0
 def test(self):
     print "---- TESTING PROGRAM ----"
     runner = TestRunner(self.context)
     runner.run()
     self.context.tests_failed = runner.error_count
     self.context.tests_run = runner.test_count
     self.context.tests_passed = runner.success_count
Exemplo n.º 2
0
 def test(self):
     """Run tests. """
     test_runner = TestRunner(self.__build_targets,
                              self.__options,
                              self.__target_database,
                              self.__direct_targets)
     return test_runner.run()
Exemplo n.º 3
0
 def test(self):
     """Run tests. """
     test_runner = TestRunner(self.all_targets_expanded,
                              self.options,
                              self.prebuilt_cc_library_file_map,
                              self.target_database)
     return test_runner.run()
Exemplo n.º 4
0
 def test(self):
     """Run tests. """
     test_runner = TestRunner(self.__build_targets,
                              self.__options,
                              self.__target_database,
                              self.__direct_targets)
     return test_runner.run()
Exemplo n.º 5
0
 def test(self):
     """Run tests. """
     test_runner = TestRunner(self.all_targets_expanded,
                              self.options,
                              self.prebuilt_cc_library_file_map,
                              self.target_database)
     return test_runner.run()
Exemplo n.º 6
0
    def test_run_test_class(self):
        """ Tests running a valid test class """

        module = TestRunner._load_test_module(self.test_path +
                                              self.valid_module)
        test_result = TestRunner._run_test_class(module)

        self.assertEqual(len(test_result.failures), 1)
        self.assertEqual(test_result.testsRun, 2)
Exemplo n.º 7
0
class Command(BaseCommand):
    def __init__(self, stdout=None, stderr=None, no_color=False):
        super().__init__(stdout, stderr, no_color)
        self.test_runner = TestRunner()

    def handle(self, *args, **options):
        game_id = options['game_id'][0]
        actor_id = options['actor_id'][0]
        self.test_runner.run_load_test(game_id, actor_id)

    def add_arguments(self, parser):
        parser.add_argument('game_id', nargs='+', type=str)
        parser.add_argument('actor_id', nargs='+', type=str)
Exemplo n.º 8
0
    def init_runner(self, path, t, num, allowed_features=None):
        topo = topology.Topology(**t)
        router_features = [
            f for r in topo.routers
            for f in r.get_supported_features(allowed_features)
        ]
        runner = TestRunner(path, topo, self, router_features)
        runner.test_num = num
        possible_args = {
            f: r.get_possible_args(allowed_features)[f]
            for r in topo.routers
            for f in r.get_possible_args(allowed_features)
        }

        return runner, router_features, possible_args
Exemplo n.º 9
0
    def test_discover_invalid(self):
        """ Test discovery on invalid pattern """

        expected = []
        actual = TestRunner._discover(self.test_path, "cant-find-anything")

        self.assertListEqual(expected, actual)
def TestConfig(config, connections, tests):
    """Set up the ample tests, then run the test for each browser/platform
    combination in separate asynchronous threads. Afterwards, email
    the test report to recipients."""

    count = 0
    CreateSubMTF(config['mtf_file_path'], connections['browsers'],
                 connections['platforms'])
    Global.tmpPath = config['tmp_file_path']
    Global.screenshotsPath = config['screenshots_path']

    BackupScreenshotsAndTestReport(
        Global.screenshotsPath)  # Back up old screenshots to backup folder

    threads = []
    for platform in connections['platforms']:
        print('Platform: %s' % platform)
        for browser in connections['browsers']:
            print('Browser: %s' % browser)
            if browser == 'internet explorer' and platform == 'linux':
                pass
            else:
                # start test in a separate asynchronous thread
                test_runner = TestRunner(config, connections, browser)
                p = Process(target=RunTests,
                            args=(tests, platform, browser, config,
                                  connections['url'], count, test_runner))
                threads.append(p)
                p.start()  # start the test
                count += 1
    for p in threads:
        p.join()  # wait for the tests to finish before continuing

    # merge test report
    MergeTestReports(config, connections)
Exemplo n.º 11
0
def main():
    """
    Invocation order being.
    1. Parsing the command line arguments.
    2. Parsing the config file to get the configuration details.
    3. Invoking the test_list_builder to build the TC run order.
    4. Passing the details to the test_runner.
    """

    start = time.time()
    args = pars_args()

    try:
        param_obj = ParamsHandler(args.config_file)
    except IOError:
        print("Error: can't find config file or read data.")
        return

    # Building the test list and obtaining the TC details.
    test_cases_tuple = TestListBuilder.create_test_dict(
        args.test_dir, args.spec_test)
    test_cases_dict = test_cases_tuple[0]
    test_cases_component = test_cases_tuple[1]

    # Creating log dirs.
    sys.path.insert(1, ".")
    from common.relog import Logger
    args.log_dir = f'{args.log_dir}/{datetime.datetime.now()}'
    Logger.log_dir_creation(args.log_dir, test_cases_component,
                            test_cases_dict)

    # Pre test run test list builder is modified.
    test_cases_dict = TestListBuilder.pre_test_run_list_modify(test_cases_dict)

    # Environment setup.
    env_obj = environ(param_obj, args.log_dir + "/main.log", args.log_level)
    env_obj.setup_env()

    # invoke the test_runner.
    TestRunner.init(test_cases_dict, param_obj, args.log_dir, args.log_level,
                    args.concur_count)
    result_queue = TestRunner.run_tests()

    # Environment cleanup. TBD.
    total_time = time.time() - start
    ResultHandler.handle_results(result_queue, args.result_path, total_time,
                                 args.excel_sheet)
Exemplo n.º 12
0
    def _run_test(self, test_cases, report):
        """
        Run the test suites and return the report
        """

        if self._args.verbose:
            verbosity = TestRunner.VERBOSITY_VERBOSE
        elif self._args.quiet:
            verbosity = TestRunner.VERBOSITY_QUIET
        else:
            verbosity = TestRunner.VERBOSITY_NORMAL

        runner = TestRunner(report,
                            verbosity=verbosity,
                            num_threads=self._args.num_threads,
                            fail_fast=self._args.fail_fast,
                            dont_catch_exceptions=self._args.dont_catch_exceptions,
                            no_color=self._args.no_color)
        runner.run(test_cases)
Exemplo n.º 13
0
    def test_discover_valid(self):
        """ Test discovery on valid pattern """

        expected = [
            self.test_path + x
            for x in [self.valid_module, self.invalid_module]
        ]
        actual = TestRunner._discover(self.test_path,
                                      TestRunner.DEFAULT_PATTERN)

        self.assertListEqual(expected, actual)
Exemplo n.º 14
0
class Command(BaseCommand):
    def __init__(self, stdout=None, stderr=None, no_color=False):
        super().__init__(stdout, stderr, no_color)
        self.test_runner = TestRunner()

    def handle(self, *args, **options):
        run_count = 50
        user_count = 4
        print("Starting stress test: {} runs for {} users.".format(
            run_count, user_count))
        start = datetime.utcnow()
        self.run_stress_test(run_count, user_count)
        elapsed = datetime.utcnow() - start
        print("Done! {} runs for {} users took {}s.".format(
            run_count, user_count, elapsed.total_seconds()))

    def run_stress_test(self, count, user_count):
        logger.enabled = False
        for current_run in range(1, 1 + count):
            start = datetime.utcnow()
            self.test_runner.run_create_test(user_count)
            elapsed = datetime.utcnow() - start
            print("> Finished run {} in {}s.".format(current_run,
                                                     elapsed.total_seconds()))
Exemplo n.º 15
0
def main():
    """
    Invocation order being.
    1. Parsing the command line arguments.
    2. Parsing the config file to get the configuration details.
    3. Invoking the test_list_builder to build the TC run order.
    4. Passing the details to the test_runner.
    """

    start = time.time()
    args = pars_args()

    if args.show_backtrace:

        def errer(exc, msg=None):
            raise exc
    else:

        def errer(exc, msg=None):
            if not msg:
                msg = "error: {exc}"
            print(msg.format(exc=exc), file=sys.stderr)
            sys.exit(1)

    spinner = Halo(spinner='dots')
    spinner.start("Starting param handling")
    try:
        param_obj = ParamsHandler(args.config_file)
    except OSError as e:
        spinner.fail("error in param handling")
        errer(e, "Error on loading config file: {exc}")
    spinner.succeed("Param Handling Success.")

    spinner.start("Building test list")
    # Building the test list and obtaining the TC details.
    excluded_result = param_obj.get_excluded_tests()
    if not excluded_result[1]:
        spinner.fail("Error in exclude list. Invalid path present")
        sys.exit(1)

    excluded_tests = excluded_result[0]
    spec_test = (args.test_dir.endswith(".py")
                 and args.test_dir.split("/")[-1].startswith("test"))
    try:
        TestListBuilder.create_test_dict(args.test_dir, excluded_tests,
                                         spec_test)
    except FileNotFoundError as e:
        spinner.fail("FileNotFoundError in test list builder")
        errer(e, "Error: Can't find the file")
    spinner.succeed("Test List built")

    spinner.start("Creating log dirs")
    # Creating log dirs.
    current_time_rep = str(datetime.datetime.now())
    log_dir_current = f"{args.log_dir}/{current_time_rep}"
    Logger.log_dir_creation(log_dir_current,
                            TestListBuilder.get_test_path_list())
    latest = 'latest'
    tmplink = f"{args.log_dir}/{latest}.{current_time_rep}"
    os.symlink(current_time_rep, tmplink)
    os.rename(tmplink, f"{args.log_dir}/{latest}")
    spinner.succeed("Log dir creation successful.")

    # Framework Environment datastructure.
    env_obj = FrameworkEnv()
    env_obj.init_ds()

    # Environment setup.
    env_set = environ(param_obj, env_obj, errer, f"{log_dir_current}/main.log",
                      args.log_level)
    logger_obj = env_set.get_framework_logger()
    logger_obj.debug("Running env setup.")
    env_set.setup_env(args.keep_logs)

    # invoke the test_runner.
    logger_obj.debug("Running the test cases.")
    TestRunner.init(TestListBuilder, param_obj, env_set, log_dir_current,
                    args.log_level, args.concur_count, spec_test)
    result_queue = TestRunner.run_tests(env_obj)
    logger_obj.debug("Collected test results queue.")

    # Environment cleanup. TBD.
    total_time = time.time() - start

    # Setup the result
    if args.excel_sheet is None:
        handle_results(result_queue, total_time, logger_obj)
    else:
        handle_results(result_queue, total_time, logger_obj, args.excel_sheet)

    logger_obj.debug("Starting env teardown.")
    env_set.teardown_env()
Exemplo n.º 16
0
def run_test(run_test_method):
    with TestRunner(run_test_method) as runner:
        return runner.run()
Exemplo n.º 17
0
 def run(self, target):
     """Run the target. """
     key = self._get_normpath_target(target)
     runner = TestRunner(self.all_targets_expanded, self.options)
     return runner.run_target(key)
Exemplo n.º 18
0
 def __init__(self, stdout=None, stderr=None, no_color=False):
     super().__init__(stdout, stderr, no_color)
     self.test_runner = TestRunner()
Exemplo n.º 19
0
 def run(self, target):
     """Run the target. """
     key = self._get_normpath_target(target)
     runner = TestRunner(self.all_targets_expanded, self.options)
     return runner.run_target(key)
Exemplo n.º 20
0
import os
import sys
from test_runner import TestRunner

__author__ = 'mbugaiov'
__copyright__ = "Copyright (C) 2017 Quest, Inc.  All rights reserved"

#if os.getuid() != 0:
#    sys.exit("Please run script as root.")
ver = sys.version.split()[0]
if ver[0:3] != "2.7":
    sys.exit("Please use python version 2.7")

if __name__ == '__main__':
    test_obj = TestRunner()
    test_obj.run_tests()
Exemplo n.º 21
0
    def test_load_module_valid(self):
        """ Tests loading a valid test module """

        actual = TestRunner._load_test_module(self.test_path +
                                              self.valid_module)
        self.assertIsNotNone(actual)