Exemplo n.º 1
0
 def run(self):  # pylint: disable=E0202
     """ invoked by the command """
     tests = unittest.TestLoader().discover('src/test', pattern='test*.py')
     result = XMLTestRunner(output='test-reports').run(tests)
     if result.wasSuccessful():
         return 0
     return 1
Exemplo n.º 2
0
    def configure(self, options):
        '''configure before testing'''
        self.test_options = options
        if options.xunit:
            try:
                from xmlrunner import XMLTestRunner
            except ImportError:
                raise Exception("unittest-xml-reporting not installed")
            self.runner = XMLTestRunner(stream=sys.stderr, \
                                        verbosity=2, output=options.xunit)
        else:
            self.runner = unittest.TextTestRunner(stream=sys.stderr, \
                                                  verbosity=2)

        if options.manifest:
            fbname, fext = os.path.splitext(os.path.basename(options.manifest))
            assert fbname == "manifest" or fext == ".manifest", \
                  "Please specify file name like xxx.manifest or manifest.xxx"
            self.tclist = self.__get_tc_from_manifest(options.manifest)

        if options.tests:
            tcs = [t[0:-3] if t.endswith(".py") else t[0:-1] \
                               if t.endswith("/") else t for t in options.tests]
            self.tclist.extend([tc.replace("/", ".") for tc in tcs])

        if options.logdir:
            logdir = self._get_log_dir(options.logdir)
            self.log_handler = LogHandler(logdir)

        try:
            self.context.def_timeout = int(options.timeout)
        except ValueError:
            print "timeout need an integer value"
            raise
Exemplo n.º 3
0
def main(useXMLRunner=True):
    # load hard-coded tests
    unitsuite = unittest.makeSuite(UnitTests)
    modelsuite = unittest.makeSuite(ModelTests)
    funsuite = unittest.makeSuite(FunctionalTests)

    # load recorded tests
    os.chdir('test/recordedtests')
    tests = glob.glob("recordedtest_*.py")  # get all the recorded tests
    for test in tests:
        mod, ext = os.path.splitext(os.path.split(test)[-1])
        m = __import__(mod)  # dynamic import magic
        testname = "testMethod" + mod[-3:]  # recover test number
        setattr(RecordedTests, testname, m.testMethod)
    os.chdir('../..')
    recsuite = unittest.makeSuite(RecordedTests)

    # combine and run tests
    alltests = unittest.TestSuite([unitsuite, modelsuite, funsuite, recsuite])
    if useXMLRunner:
        stream = file("testresults.xml", "w")
        runner = XMLTestRunner(stream)
        result = runner.run(alltests)
        stream.close()
    else:
        runner = unittest.TextTestRunner(verbosity=2)
        result = runner.run(alltests)
    return result
Exemplo n.º 4
0
def main():
    # type: (*str) -> int
    """
    Run tests given is models.

    Returns 0 if success or 1 if any tests fail.
    """
    try:
        from xmlrunner import XMLTestRunner as TestRunner
        test_args = {'output': 'logs'}
    except ImportError:
        from unittest import TextTestRunner as TestRunner
        test_args = {}

    parser = argparse.ArgumentParser(description="Test SasModels Models")
    parser.add_argument("-v",
                        "--verbose",
                        action="store_const",
                        default=1,
                        const=2,
                        help="Use verbose output")
    parser.add_argument("-e",
                        "--engine",
                        default="all",
                        help="Engines on which to run the test.  "
                        "Valid values are opencl, cuda, dll, and all. "
                        "Defaults to all if no value is given")
    parser.add_argument("models",
                        nargs="*",
                        help="The names of the models to be tested.  "
                        "If the first model is 'all', then all but the listed "
                        "models will be tested.  See core.list_models() for "
                        "names of other groups, such as 'py' or 'single'.")
    opts = parser.parse_args()

    if opts.engine == "opencl":
        if not use_opencl():
            print("opencl is not available")
            return 1
        loaders = ['opencl']
    elif opts.engine == "dll":
        loaders = ["dll"]
    elif opts.engine == "cuda":
        if not use_cuda():
            print("cuda is not available")
            return 1
        loaders = ['cuda']
    elif opts.engine == "all":
        loaders = ['dll']
        if use_opencl():
            loaders.append('opencl')
        if use_cuda():
            loaders.append('cuda')
    else:
        print("unknown engine " + opts.engine)
        return 1

    runner = TestRunner(verbosity=opts.verbose, **test_args)
    result = runner.run(make_suite(loaders, opts.models))
    return 1 if result.failures or result.errors else 0
Exemplo n.º 5
0
 def run(self, ):
     loader = TestLoader()
     tests = loader.discover('.', 'test_*.py')
     t = XMLTestRunner(verbosity=1, output=self.TEST_RESULTS)
     res = t.run(tests)
     if not res.wasSuccessful():
         raise FailTestException()
Exemplo n.º 6
0
def run_tests ():
    suite = unittest.TestSuite ()
    loader = unittest.TestLoader ()

    suite.addTest (loader.loadTestsFromModule (CsvToolTester))

    runner = XMLTestRunner(file('testoutput.xml', "w"))
    result = runner.run(suite)
Exemplo n.º 7
0
 def self_valid():
     self_test_suite = unittest.TestSuite()
     self_test_suite.addTests(
         loader.loadTestsFromTestCase(TestCheckOpiFormatMethods))
     runner = XMLTestRunner(output=os.path.join(logs_dir,
                                                "check_opi_format"),
                            stream=sys.stdout)
     return runner.run(self_test_suite).wasSuccessful()
Exemplo n.º 8
0
 def run_tests(self):
     # If we perform this input at the top of the file, we get an
     # import error because we need to load this file to discover
     # dependenices.
     from xmlrunner import XMLTestRunner
     tests = TestLoader().discover('tests', pattern='test_*.py')
     runner = XMLTestRunner(output='reports')
     result = runner.run(tests)
     exit(0 if result.wasSuccessful() else 1)
Exemplo n.º 9
0
def run_instrument_tests(inst_name, reports_path):
    """
    Runs the test suite
    :param inst_name: The name of the instrument to run tests on,
                    used to sort the test reports folder into instrument-specific reports
    :param reports_path: The path to store test reports
    :return: True if the tests passed, false otherwise
    """
    suite = unittest.TestSuite()
    loader = unittest.TestLoader()

    for case in [
            ScriptingDirectoryTests, GlobalsTests, VersionTests,
            ConfigurationsSingleTests, ComponentsSingleTests
    ]:
        suite.addTests(loader.loadTestsFromTestCase(case))

    # Add configs test suite a dynamic number of times with an argument of the config name.
    # unittest's test loader is unable to take arguments to test classes by default so have
    # to use the getTestCaseNames() syntax and explicitly add the argument ourselves.

    try:
        configs = ConfigurationUtils(
            Settings.config_repo_path).get_configurations_as_list()
        components = ComponentUtils(
            Settings.config_repo_path).get_configurations_as_list()
        synoptics = SynopticUtils(
            Settings.config_repo_path).get_synoptics_filenames()
    except IOError as e:
        print(
            "Failed to build tests for instrument {}: exception occured while generating tests."
            .format(inst_name))
        traceback.print_exc(e)
        return False

    for config in configs:
        suite.addTests([
            ConfigurationsTests(test, config)
            for test in loader.getTestCaseNames(ConfigurationsTests)
        ])

    for component in components:
        suite.addTests([
            ComponentsTests(test, component)
            for test in loader.getTestCaseNames(ComponentsTests)
        ])

    for synoptic in synoptics:
        suite.addTests([
            SynopticTests(test, synoptic)
            for test in loader.getTestCaseNames(SynopticTests)
        ])

    runner = XMLTestRunner(output=str(os.path.join(reports_path, inst_name)),
                           stream=sys.stdout)
    return runner.run(suite).wasSuccessful()
Exemplo n.º 10
0
def main():
    # type: () -> int
    """
    Run tests given is sys.argv.

    Returns 0 if success or 1 if any tests fail.
    """
    try:
        from xmlrunner import XMLTestRunner as TestRunner
        test_args = { 'output': 'logs' }
    except ImportError:
        from unittest import TextTestRunner as TestRunner
        test_args = { }

    models = sys.argv[1:]
    if models and models[0] == '-v':
        verbosity = 2
        models = models[1:]
    else:
        verbosity = 1
    if models and models[0] == 'opencl':
        if not HAVE_OPENCL:
            print("opencl is not available")
            return 1
        loaders = ['opencl']
        models = models[1:]
    elif models and models[0] == 'dll':
        # TODO: test if compiler is available?
        loaders = ['dll']
        models = models[1:]
    elif models and models[0] == 'opencl_and_dll':
        loaders = ['opencl', 'dll']
        models = models[1:]
    else:
        loaders = ['opencl', 'dll']
    if not models:
        print("""\
usage:
  python -m sasmodels.model_test [-v] [opencl|dll] model1 model2 ...

If -v is included on the command line, then use verbose output.

If neither opencl nor dll is specified, then models will be tested with
both OpenCL and dll; the compute target is ignored for pure python models.

If model1 is 'all', then all except the remaining models will be tested.

""")

        return 1

    runner = TestRunner(verbosity=verbosity, **test_args)
    result = runner.run(make_suite(loaders, models))
    return 1 if result.failures or result.errors else 0
Exemplo n.º 11
0
def discover_and_run_tests(test_dir, f_pattern, out_dir):

    loader = unittest.TestLoader()
    tests = loader.discover(start_dir=test_dir,
                            pattern=f_pattern,
                            top_level_dir='.')
    runner = XMLTestRunner(
        output=out_dir,
        stream=sys.stdout,
        outsuffix=datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f"))
    runner.run(tests)
Exemplo n.º 12
0
def main():
    if not os.path.isfile('run_tests.py'):
        os.chdir(SCRIPT_DIR)
    runner = XMLTestRunner(verbosity=2, output='results')
    tests = unittest.TestSuite()
    tests.addTest(test_arguments.ArgumentsTest())
    result = runner.run(tests)
    if result.wasSuccessful():
        return 0
    else:
        return 1
Exemplo n.º 13
0
def run_tests(device_name='', use_xmlrunner=False):
    """
    Executes the unit tests specified by the test suite.
    This should be called from CMake.
    """
    test_result = unittest.TestResult()
    if use_xmlrunner and 'XMLTestRunner' in globals():
        test_runner = XMLTestRunner(verbosity=2)
    else:
        test_runner = unittest.TextTestRunner(verbosity=2)
    test_result = test_runner.run(get_test_suite(device_name))
    return test_result
Exemplo n.º 14
0
def main():
    """
    Run tests given is sys.argv.

    Returns 0 if success or 1 if any tests fail.
    """
    try:
        from xmlrunner import XMLTestRunner as TestRunner
        test_args = { 'output': 'logs' }
    except ImportError:
        from unittest import TextTestRunner as TestRunner
        test_args = { }

    models = sys.argv[1:]
    if models and models[0] == '-v':
        verbosity = 2
        models = models[1:]
    else:
        verbosity = 1
    if models and models[0] == 'opencl':
        if not HAVE_OPENCL:
            print("opencl is not available")
            return 1
        loaders = ['opencl']
        models = models[1:]
    elif models and models[0] == 'dll':
        # TODO: test if compiler is available?
        loaders = ['dll']
        models = models[1:]
    elif models and models[0] == 'opencl_and_dll':
        loaders = ['opencl', 'dll']
        models = models[1:]
    else:
        loaders = ['opencl', 'dll']
    if not models:
        print("""\
usage:
  python -m sasmodels.model_test [-v] [opencl|dll] model1 model2 ...

If -v is included on the command line, then use verboe output.

If neither opencl nor dll is specified, then models will be tested with
both opencl and dll; the compute target is ignored for pure python models.

If model1 is 'all', then all except the remaining models will be tested.

""")

        return 1

    runner = TestRunner(verbosity=verbosity, **test_args)
    result = runner.run(make_suite(loaders, models))
    return 1 if result.failures or result.errors else 0
Exemplo n.º 15
0
def run_tests():
    # type: () -> None
    """Run all the tests."""

    # my-py's typeshed does not have defaultTestLoader and TestLoader type information so suppresss
    # my-py type information.
    all_tests = unittest.defaultTestLoader.discover(start_dir="tests")  # type: ignore

    runner = XMLTestRunner(verbosity=2, failfast=False, output='results')
    result = runner.run(all_tests)

    sys.exit(not result.wasSuccessful())
Exemplo n.º 16
0
def run_tests():
    # type: () -> None
    """Run all the tests."""

    # my-py's typeshed does not have defaultTestLoader and TestLoader type information so suppresss
    # my-py type information.
    all_tests = unittest.defaultTestLoader.discover(start_dir="tests")  # type: ignore

    runner = XMLTestRunner(verbosity=2, failfast=False, output='results')
    result = runner.run(all_tests)

    sys.exit(not result.wasSuccessful())
def run_tests_on_pages(reports_path, pages, wiki_dir, highest_issue_num, test_class):
    suite = unittest.TestSuite()
    loader = unittest.TestLoader()

    # Add spelling test suite a dynamic number of times with an argument of the page name.
    # unittest's test loader is unable to take arguments to test classes by default so have
    # to use the getTestCaseNames() syntax and explicitly add the argument ourselves.
    for page in pages:
        suite.addTests([test_class(test, IGNORED_ITEMS, (page, pages, wiki_dir, highest_issue_num))
                        for test in loader.getTestCaseNames(test_class)])

    runner = XMLTestRunner(output=str(reports_path), stream=sys.stdout)
    return runner.run(suite).wasSuccessful()
Exemplo n.º 18
0
def main(args):
    """
  Main function which runs the tests and generate the result file.

  :return: result value for the CI environments
  :rtype: int
  """
    # Print header
    tstart = time.time()
    log.info("Start ESCAPE tests")
    log.info("-" * 70)
    if args.timeout:
        log.info("Set kill timeout for test cases: %ds\n" % args.timeout)
    # Create overall test suite
    test_suite = create_test_suite(tests_dir=CWD,
                                   show_output=args.show_output,
                                   run_only_tests=args.testcases,
                                   kill_timeout=args.timeout,
                                   standalone=args.standalone)
    sum_test_cases = test_suite.countTestCases()
    log.info("-" * 70)
    log.info("Read %d test cases" % sum_test_cases)
    # Run test suite in the specific context
    results = None
    if args.verbose:
        output_context_manager = Tee(filename=REPORT_FILE)
    else:
        output_context_manager = open(REPORT_FILE, 'w', buffering=0)
    with output_context_manager as output:
        # Create the Runner class which runs the test cases collected in a
        # TestSuite object
        if args.failfast:
            log.info("Using failfast mode!")
        test_runner = XMLTestRunner(output=output,
                                    verbosity=2,
                                    failfast=args.failfast)
        try:
            # Run the test cases and collect the results
            if sum_test_cases:
                results = test_runner.run(test_suite)
        except KeyboardInterrupt:
            log.warning("\nReceived KeyboardInterrupt! "
                        "Abort running main test suite...")
    # Print footer
    log.info("-" * 70)
    delta = time.time() - tstart
    log.info("Total elapsed time: %s sec" % timedelta(seconds=delta))
    log.info("-" * 70)
    log.info("End ESCAPE tests")
    # Evaluate results values
    return results.wasSuccessful() if results is not None else False
Exemplo n.º 19
0
def main():
    from unittest import TestLoader, TestSuite
    from xmlrunner import XMLTestRunner

    import db_checks

    suites = [
        TestLoader().loadTestsFromTestCase(db_checks.Test),
    ]

    tests_results_dir = "test-reports-python"
    result = XMLTestRunner(output=tests_results_dir).run(TestSuite(suites))
    if not result.wasSuccessful():
        sys.exit(1)
Exemplo n.º 20
0
def main(args):
    """
  Main function which runs the tests and generate the result file.

  :return: result value for the CI environments
  :rtype: int
  """
    # Print header
    log.info("Start ESCAPE test")
    log.info("-" * 70)
    if args.timeout:
        log.info("Set kill timeout for test cases: %ds\n" % args.timeout)
    # Create overall test suite
    test_suite = create_test_suite(tests_dir=CWD,
                                   show_output=args.show_output,
                                   run_only_tests=args.testcases,
                                   kill_timeout=args.timeout,
                                   standalone=args.standalone)
    sum_test_cases = test_suite.countTestCases()
    log.info("-" * 70)
    log.info("Read %d test cases" % sum_test_cases)
    if not sum_test_cases:
        # Footer
        log.info("-" * 70)
        log.info("End ESCAPE test")
        return 0
    # Run test suite in the specific context
    results = []
    if args.verbose:
        output_context_manager = Tee(filename=REPORT_FILE)
    else:
        output_context_manager = open(REPORT_FILE, 'w', buffering=0)
    with output_context_manager as output:
        # Create the Runner class which runs the test cases collected in a
        # TestSuite object
        test_runner = XMLTestRunner(output=output,
                                    verbosity=2,
                                    failfast=args.failfast)
        try:
            # Run the test cases and collect the results
            results.append(test_runner.run(test_suite))
        except KeyboardInterrupt:
            log.warning(
                "\n\nReceived KeyboardInterrupt! Abort running test suite...")
    # Evaluate results values
    was_success = all(map(lambda res: res.wasSuccessful(), results))
    # Print footer
    log.info("-" * 70)
    log.info("End ESCAPE test")
    return 0 if was_success else 1
def main():
    if not os.path.isfile('run_tests.py'):
        print('Please execute from a `tests` directory!')
        return 1
    else:
        runner = XMLTestRunner(verbosity=2, output='results')
        # tests = unittest.TestLoader().discover('.', pattern='test*.py')
        tests = unittest.TestSuite()
        tests.addTest(test_scenarios.RequestsTesingUsingScenarios())
        tests.addTest(test_arguments.ArgumentsTest())
        result = runner.run(tests)
        if result.wasSuccessful():
            return 0
        else:
            return 1
Exemplo n.º 22
0
def test_main(args=[]):
    test_classes = [
        PingTest, LookupUsersTest, NamespaceTest, BlockChains, Prices,
        NamesOwnedTest, NameHistoryTest, AuthInternal, BlockChainsInternal,
        Zonefiles, WalletInternal, NodeInternal
    ]

    test_map = {}
    for t in test_classes:
        test_map[t.__name__] = t

    with test_support.captured_stdout() as out:
        try:
            test_support.run_unittest(PingTest)
        except Exception as e:
            traceback.print_exc(file=sys.stdout)
    out = out.getvalue()
    if out[-3:-1] != "OK":
        print(out)
        print(
            "Failure of the ping test means the rest of the unit tests will " +
            "fail. Is the blockstack api daemon running? (did you run " +
            "`blockstack api start`)")
        sys.exit(1)

    if len(args) == 1 and args[0] == "--list":
        print("Tests supported: ")
        for testname in test_map.keys():
            print(testname)
        return

    test_runner = test_support.run_unittest

    if "--xunit-path" in args:
        ainx = args.index("--xunit-path")
        del args[ainx]
        from xmlrunner import XMLTestRunner
        test_runner = XMLTestRunner(output=args[ainx]).run
        del args[ainx]

    if "--api_password" in args:
        ainx = args.index("--api_password")
        del args[ainx]
        global API_PASSWORD
        API_PASSWORD = args[ainx]
        del args[ainx]

    if len(args) == 0 or args[0] == "--all":
        args = [testname for testname in test_map.keys()]

    test_suite = unittest.TestSuite()
    for test_name in args:
        test_suite.addTest(unittest.TestLoader().loadTestsFromTestCase(
            test_map[test_name]))
    result = test_runner(test_suite)
    if result:  # test_support.run_unittest returns None
        if result.wasSuccessful():
            sys.exit(0)
        else:
            sys.exit(1)
Exemplo n.º 23
0
def createTestRunner():
	opts, _ = getopt(sys.argv[1:], '', ['xml='])
	for o, v in opts:
		if o == '--xml':
			from xmlrunner import XMLTestRunner
			return XMLTestRunner(output=open(v, 'wb'))
	return TextTestRunner()
Exemplo n.º 24
0
    def configure(self, options):
        '''configure before testing'''
        self.test_options = options
        if options.xunit:
            try:
                from xmlrunner import XMLTestRunner
            except ImportError:
                raise Exception("unittest-xml-reporting not installed")
            self.runner = XMLTestRunner(stream=sys.stderr, \
                                        verbosity=2, output=options.xunit)
        else:
            self.runner = unittest.TextTestRunner(stream=sys.stderr, \
                                                  verbosity=2)

        if options.manifest:
            fbname, fext = os.path.splitext(os.path.basename(options.manifest))
            assert fbname == "manifest" or fext == ".manifest", \
                  "Please specify file name like xxx.manifest or manifest.xxx"
            self.tclist = self.__get_tc_from_manifest(options.manifest)

        if options.tests:
            tcs = [t[0:-3] if t.endswith(".py") else t[0:-1] \
                               if t.endswith("/") else t for t in options.tests]
            self.tclist.extend([tc.replace("/", ".") for tc in tcs])

        if options.logdir:
            logdir = self._get_log_dir(options.logdir)
            self.log_handler = LogHandler(logdir)

        try:
            self.context.def_timeout = int(options.timeout)
        except ValueError:
            print "timeout need an integer value"
            raise
Exemplo n.º 25
0
def main(useXMLRunner=True):
    # load hard-coded tests
    unitsuite = unittest.makeSuite(UnitTests)
    modelsuite = unittest.makeSuite(ModelTests)
    funsuite = unittest.makeSuite(FunctionalTests)

    # combine and run tests
    alltests = unittest.TestSuite([unitsuite, modelsuite, funsuite])
    if useXMLRunner:
        stream = file("testresults.xml", "w")
        runner = XMLTestRunner(stream)
        result = runner.run(alltests)
        stream.close()
    else:
        runner = unittest.TextTestRunner(verbosity=2)
        result = runner.run(alltests)
    return result
Exemplo n.º 26
0
def run_self_tests(reports_path):
    """
    Runs our own unit tests.
    :return: True if all tests passed, False otherwise
    """
    print("Running self-tests...")
    suite = unittest.TestLoader().discover(os.path.join("util", "test_utils"))
    return XMLTestRunner(output=str(reports_path),
                         stream=sys.stdout).run(suite).wasSuccessful()
Exemplo n.º 27
0
def main(useXMLRunner=True):
    # load hard-coded tests
    # unitsuite = unittest.makeSuite(UnitTests)
    # modelsuite = unittest.makeSuite(ModelTests)
    funsuite = unittest.makeSuite(FunctionalTests)

    # combine and run tests
    # alltests = unittest.TestSuite([unitsuite, modelsuite, funsuite])
    alltests = unittest.TestSuite([funsuite])
    if useXMLRunner:
        stream = file("testresults.xml", "w")
        runner = XMLTestRunner(stream)
        result = runner.run(alltests)
        stream.close()
    else:
        runner = unittest.TextTestRunner(verbosity=2)
        result = runner.run(alltests)
    return result
Exemplo n.º 28
0
def runtests():
    suite = unittest.defaultTestLoader.loadTestsFromNames(['tests.unit.avs_result_test',
                                                           'tests.unit.cvv_result_test',
                                                           'tests.unit.credit_card_tests',
                                                           'tests.unit.gateways.authorize_net_tests',
                                                           'tests.unit.gateways.bogus_tests',
                                                           'tests.unit.gateways.braintree_blue_tests',
                                                           'tests.unit.gateways.braintree_orange_tests',
                                                           'tests.unit.gateways.cybersource_tests',
                                                           'tests.unit.gateways.payflow_tests',
                                                           'tests.unit.gateways.paymentech_orbital_tests',])
    if os.environ.get('XML_OUTPUT', False):
        from xmlrunner import XMLTestRunner
        runner = XMLTestRunner()
    else:
        runner = unittest.TextTestRunner(verbosity=1, descriptions=False)
    result = runner.run(suite).wasSuccessful()
    exit_code = 0 if result else 1
    sys.exit(exit_code)
Exemplo n.º 29
0
    def run(self):
        # Installing required packages, running egg_info and build_ext are
        # part of normal operation for setuptools.command.test.test
        if self.distribution.install_requires:
            self.distribution.fetch_build_eggs(
                self.distribution.install_requires)
        if self.distribution.tests_require:
            self.distribution.fetch_build_eggs(self.distribution.tests_require)
        if self.xunit_output:
            if sys.version_info[:2] == (2, 6):
                self.distribution.fetch_build_eggs(
                    ["unittest-xml-reporting>=1.14.0,<2.0.0a0"])
            else:
                self.distribution.fetch_build_eggs(["unittest-xml-reporting"])
        self.run_command('egg_info')
        build_ext_cmd = self.reinitialize_command('build_ext')
        build_ext_cmd.inplace = 1
        self.run_command('build_ext')

        # Construct a TextTestRunner directly from the unittest imported from
        # test (this will be unittest2 under Python 2.6), which creates a
        # TestResult that supports the 'addSkip' method. setuptools will by
        # default create a TextTestRunner that uses the old TestResult class,
        # resulting in DeprecationWarnings instead of skipping tests under 2.6.
        from test import unittest, PymongoTestRunner, test_cases
        if self.test_suite is None:
            all_tests = unittest.defaultTestLoader.discover(self.test_module)
            suite = unittest.TestSuite()
            suite.addTests(
                sorted(test_cases(all_tests), key=lambda x: x.__module__))
        else:
            suite = unittest.defaultTestLoader.loadTestsFromName(
                self.test_suite)
        if self.xunit_output:
            from xmlrunner import XMLTestRunner
            runner = XMLTestRunner(verbosity=2,
                                   failfast=self.failfast,
                                   output=self.xunit_output)
        else:
            runner = PymongoTestRunner(verbosity=2, failfast=self.failfast)
        result = runner.run(suite)
        sys.exit(not result.wasSuccessful())
Exemplo n.º 30
0
    def run(self, ):

        # Perform imports in run to avoid test dependencies in setup
        from xmlrunner import XMLTestRunner
        import coverage
        from unittest import TestLoader

        loader = TestLoader()
        tests = loader.discover('.', 'test_*.py')
        t = XMLTestRunner(verbosity=1, output=self.TEST_RESULTS)

        cov = coverage.Coverage(
            omit=['*/tests/', 'test_*.py', ],
            source=self.MODULE_NAMES,
        )
        cov.start()
        t.run(tests)
        cov.stop()
        cov.save()
        cov.xml_report(outfile=self.COVERAGE_RESULTS)
Exemplo n.º 31
0
    def run(self):
        # Installing required packages, running egg_info and build_ext are
        # part of normal operation for setuptools.command.test.test
        if self.distribution.install_requires:
            self.distribution.fetch_build_eggs(
                self.distribution.install_requires)
        if self.distribution.tests_require:
            self.distribution.fetch_build_eggs(self.distribution.tests_require)
        if self.xunit_output:
            if sys.version_info[:2] == (2, 6):
                self.distribution.fetch_build_eggs(
                    ["unittest-xml-reporting>=1.14.0,<2.0.0a0"])
            else:
                self.distribution.fetch_build_eggs(["unittest-xml-reporting"])
        self.run_command('egg_info')
        build_ext_cmd = self.reinitialize_command('build_ext')
        build_ext_cmd.inplace = 1
        self.run_command('build_ext')

        # Construct a TextTestRunner directly from the unittest imported from
        # test (this will be unittest2 under Python 2.6), which creates a
        # TestResult that supports the 'addSkip' method. setuptools will by
        # default create a TextTestRunner that uses the old TestResult class,
        # resulting in DeprecationWarnings instead of skipping tests under 2.6.
        from test import unittest, PymongoTestRunner, test_cases
        if self.test_suite is None:
            all_tests = unittest.defaultTestLoader.discover(self.test_module)
            suite = unittest.TestSuite()
            suite.addTests(sorted(test_cases(all_tests),
                                  key=lambda x: x.__module__))
        else:
            suite = unittest.defaultTestLoader.loadTestsFromName(
                self.test_suite)
        if self.xunit_output:
            from xmlrunner import XMLTestRunner
            runner = XMLTestRunner(verbosity=2, failfast=self.failfast,
                                   output=self.xunit_output)
        else:
            runner = PymongoTestRunner(verbosity=2, failfast=self.failfast)
        result = runner.run(suite)
        sys.exit(not result.wasSuccessful())
Exemplo n.º 32
0
def runtests():
    suite = unittest.defaultTestLoader.loadTestsFromNames([
        'tests.unit.avs_result_test',
        'tests.unit.cvv_result_test',
        'tests.unit.credit_card_tests',
        'tests.unit.gateways.authorize_net_tests',
        'tests.unit.gateways.bogus_tests',
        'tests.unit.gateways.braintree_blue_tests',
        'tests.unit.gateways.braintree_orange_tests',
        'tests.unit.gateways.cybersource_tests',
        'tests.unit.gateways.payflow_tests',
        'tests.unit.gateways.paymentech_orbital_tests',
    ])
    if os.environ.get('XML_OUTPUT', False):
        from xmlrunner import XMLTestRunner
        runner = XMLTestRunner()
    else:
        runner = unittest.TextTestRunner(verbosity=1, descriptions=False)
    result = runner.run(suite).wasSuccessful()
    exit_code = 0 if result else 1
    sys.exit(exit_code)
Exemplo n.º 33
0
 def runTest(self, isSilent=False):
     '''
     if isSilent is False then sys.stderr will be used as output of result.
     Otherwise output will be saved to the file.
     Read documentation of TextTestRunner for more information.
     '''
     paramDic = {'stream': None, 'verbosity': 2, 'output': 'test-reports'}
     if isSilent:
        paramDic['stream'] = open(self.outLogFile, 'w')
     suite = unittest.TestLoader().loadTestsFromTestCase(self.cTest)
     self.result = XMLTestRunner(**paramDic).run(suite)
     if paramDic['stream']:
        paramDic['stream'].close()
     self.checkResult(isSilent)
Exemplo n.º 34
0
    def _run_test_case(self, suits):
        """
        run test case
        """
        if self.debug is False:
            for filename in os.listdir(os.getcwd()):
                if filename == "reports":
                    break
            else:
                os.mkdir(os.path.join(os.getcwd(), "reports"))

            if self.report is None:
                now = time.strftime("%Y_%m_%d_%H_%M_%S")
                report_path = os.path.join(os.getcwd(), "reports",
                                           now + "_result.html")
                BrowserConfig.report_path = report_path
            else:
                report_path = os.path.join(os.getcwd(), "reports", self.report)

            with (open(report_path, 'wb')) as fp:
                log.info(seldom_str)
                if report_path.split(".")[-1] == "xml":
                    runner = XMLTestRunner(output=fp)
                    runner.run(suits)
                else:
                    runner = HTMLTestRunner(stream=fp,
                                            title=self.title,
                                            description=self.description)
                    runner.run(suits,
                               rerun=self.rerun,
                               save_last_run=self.save_last_run)

            log.info("generated html file: file:///{}".format(report_path))
            webbrowser.open_new("file:///{}".format(report_path))
        else:
            runner = unittest.TextTestRunner(verbosity=2)
            log.info(
                "A run the test in debug mode without generating HTML report!")
            log.info(seldom_str)
            runner.run(suits)
Exemplo n.º 35
0
    def _run_test_case(self, suits):
        """
        run test case
        """
        if self.debug is False:
            for filename in os.listdir(os.getcwd()):
                if filename == "reports":
                    break
            else:
                os.mkdir(os.path.join(os.getcwd(), "reports"))

            if (self.report is None) and (BrowserConfig.REPORT_PATH
                                          is not None):
                report_path = BrowserConfig.REPORT_PATH
            else:
                report_path = BrowserConfig.REPORT_PATH = os.path.join(
                    os.getcwd(), "reports", self.report)

            with (open(report_path, 'wb')) as fp:
                if report_path.split(".")[-1] == "xml":
                    runner = XMLTestRunner(output=fp)
                    runner.run(suits)
                else:
                    runner = HTMLTestRunner(stream=fp,
                                            title=self.title,
                                            description=self.description)
                    runner.run(suits,
                               rerun=self.rerun,
                               save_last_run=self.save_last_run)

            log.printf("generated html file: file:///{}".format(report_path))
            log.printf("generated log file: file:///{}".format(
                BrowserConfig.LOG_PATH))
            webbrowser.open_new("file:///{}".format(report_path))
        else:
            runner = unittest.TextTestRunner(verbosity=2)
            runner.run(suits)
            log.printf(
                "A run the test in debug mode without generating HTML report!")
Exemplo n.º 36
0
def test_main(args=[]):
    test_classes = [
        PingTest, LookupUsersTest, NamespaceTest, BlockChains, Prices,
        NamesOwnedTest, NameHistoryTest, AuthInternal, BlockChainsInternal,
        Zonefiles, WalletInternal, NodeInternal
    ]

    test_map = {}
    for t in test_classes:
        test_map[t.__name__] = t

    test_runner = unittest.TextTestRunner(verbosity=2).run

    if "--xunit-path" in args:
        ainx = args.index("--xunit-path")
        del args[ainx]
        from xmlrunner import XMLTestRunner
        test_runner = XMLTestRunner(output=args[ainx]).run
        del args[ainx]

    if len(args) == 1 and args[0] == "--list":
        print("Tests supported: ")
        for testname in test_map.keys():
            print(testname)
        return

    if "--api_password" in args:
        ainx = args.index("--api_password")
        del args[ainx]
        global API_PASSWORD
        API_PASSWORD = args[ainx]
        del args[ainx]

    if len(args) == 0 or args[0] == "--all":
        args = [testname for testname in test_map.keys()]

    test_suite = unittest.TestSuite()
    for test_name in args:
        test_suite.addTest(unittest.TestLoader().loadTestsFromTestCase(
            test_map[test_name]))
    result = test_runner(test_suite)
    if result:  # test_support.run_unittest returns None
        if result.wasSuccessful():
            sys.exit(0)
        else:
            sys.exit(1)
Exemplo n.º 37
0
def main(argv):
    """
    Runs the test files through the xml runner
    :param argv: List of command line arguments
    """
    if len(argv) != 2:
        raise ValueError("Usage: testrunner <path-to-test-file>")

    pathname = argv[1]
    if not os.path.exists(pathname):
        raise ValueError("Test file not found '{}'".format(pathname))
    if not os.path.isfile(pathname):
        raise ValueError("Test path '{}' is not a file".format(pathname))

    # Add the directory of the test to the Python path
    dirname = os.path.dirname(pathname)
    # if the directory ends with 'tests' add the parent directory as well
    # this is used in the external project PyStoG
    sys.path.insert(0, dirname)
    if os.path.split(dirname)[-1] == 'tests':
        sys.path.insert(1, os.path.split(dirname)[0])

    # Load the test and copy over any module variables so that we have
    # the same environment defined here
    test_module = imp.load_source(module_name(pathname), pathname)
    test_module_globals = dir(test_module)
    this_globals = globals()
    for key in test_module_globals:
        this_globals[key] = getattr(test_module, key)

    # create runner & execute
    runner = XMLTestRunner(output='.',
                           outsuffix='',
                           resultclass=result_class(pathname))
    unittest.main(
        module=test_module,
        # We've processed the test source so don't let unittest try to reparse it
        # This forces it to load the tests from the supplied module
        argv=(argv[0], ),
        testRunner=runner,
        # these make sure that some options that are not applicable
        # remain hidden from the help menu.
        failfast=False,
        buffer=False,
        catchbreak=False)
Exemplo n.º 38
0
class testResult(object):
    def __init__(self, testClass):
        self.cTest = testClass
        self.outLogFile = self.cTest.get_out_path('test_result.log')

    def runTest(self, isSilent=False):
        '''
        if isSilent is False then sys.stderr will be used as output of result.
        Otherwise output will be saved to the file.
        Read documentation of TextTestRunner for more information.
        '''
        paramDic = {'stream': None, 'verbosity': 2, 'output': 'test-reports'}
        if isSilent:
           paramDic['stream'] = open(self.outLogFile, 'w')
        suite = unittest.TestLoader().loadTestsFromTestCase(self.cTest)
        self.result = XMLTestRunner(**paramDic).run(suite)
        if paramDic['stream']:
           paramDic['stream'].close()
        self.checkResult(isSilent)

    def checkResult(self, isSilent=False):
        '''
        Value of isSilent is processed conversely to the runTest method.
        if isSilent is False then output will be saved to the file.
        Otherwise output will be printed to the sys.stderr.
        '''
        if self.result.wasSuccessful():
           return

        myOutFile = sys.stderr
        if not isSilent:
           myOutFile = open(self.outLogFile, 'w')

        if self.result.errors:
           print('ERRORS:', sep='\n', end='\n', file=myOutFile)
        for r in self.result.errors:
           print(*r, sep='\n', end='\n', file=myOutFile)

        if self.result.failures:
           print('FAILURES:', sep='\n', end='\n', file=myOutFile)
        for r in self.result.failures:
           print(*r, sep='\n', end='\n', file=myOutFile)
        myOutFile.close()
Exemplo n.º 39
0
def main():
    '''
    Test main function entry
    '''
    # Read command line arguments
    opts = parse_options()
    host = opts.host
    is_create = opts.create_template
    report_format = opts.report_format
    token = opts.token
    path = os.getcwd()

    if is_create:
        create_example(path)
        print("Create template successfully")
        sys.exit()
    # Log file configuration
    log_dir = PATH(os.path.join(path, 'log'))
    if not os.path.exists(log_dir):
        os.mkdir(log_dir)
    log_filename = str(time.strftime("%Y%m%d%H%M%S",
                                     time.localtime())) + '.log'
    log_path = PATH(os.path.join(log_dir, log_filename))
    log_init(PATH(log_path))
    # Use case, configuration import
    case_dir = PATH(os.path.join(path, 'case/yaml'))
    config_file = PATH(os.path.join(path, 'config/config.yaml'))
    if host and (host == 'test'):
        config_file = PATH(os.path.join(path, 'config/test_config.yaml'))
    testcase_id_list = loading_data(case_dir, config_file, token)
    script_dir = PATH(os.path.join(path, 'case/script'))
    script_file = PATH(os.path.join(script_dir, 'test_allcase.py'))
    create_script(script_file, testcase_id_list)
    # Generate test report
    suit = unittest.TestSuite()
    discover = unittest.defaultTestLoader.discover(script_dir,
                                                   pattern='test_*')
    for i in discover:
        suit.addTest(i)
    runner = HTMLTestRunner(output='reports')
    if report_format and (report_format == 'xml' or report_format == 'XML'):
        runner = XMLTestRunner(output='reports')
    runner.run(suit)
Exemplo n.º 40
0
def run_tests(argv, env, coverage, junit_xml):
    print('running tests...')
    if coverage:
        omissions = [os.path.join(root, '*') for root in VENDORED_ROOTS]
        # TODO: Drop the explicit pydevd omit once we move the subtree.
        omissions.append(os.path.join('ptvsd', 'pydevd', '*'))
        ver = 3 if sys.version_info < (3, ) else 2
        omissions.append(os.path.join('ptvsd', 'reraise{}.py'.format(ver)))
        args = [
            sys.executable,
            '-m',
            'coverage',
            'run',
            # We use --source instead of "--include ptvsd/*".
            '--source',
            'ptvsd',
            '--omit',
            ','.join(omissions),
            '-m',
            'unittest',
        ] + argv[1:]
        assert 'PYTHONPATH' not in env
        env['PYTHONPATH'] = os.pathsep.join(VENDORED_ROOTS)
        rc = subprocess.call(args, env=env)
        if rc != 0:
            print('...coverage failed!')
            sys.exit(rc)
        print('...done')
    elif junit_xml:
        from xmlrunner import XMLTestRunner  # noqa
        os.environ.update(env)
        verbosity = 1
        if '-v' in argv or '--verbose' in argv:
            verbosity = 2
        with open(junit_xml, 'wb') as output:
            unittest.main(
                testRunner=XMLTestRunner(output=output, verbosity=verbosity),
                module=None,
                argv=argv,
            )
    else:
        os.environ.update(env)
        unittest.main(module=None, argv=argv)
def run_tests(test_reports_path=DEFAULT_TEST_LOCATION):
    """
    Runs the test suite

    Args:
        test_reports_path: Path to test reports

    Returns:
        True if the tests passed, false otherwise
    """
    loader = unittest.TestLoader()
    suite = unittest.TestSuite()
    for case in [
            DeviceInfoGeneratorTests, GuiUtilsTests, SystemPathTests,
            FileSystemUtilsTests
    ]:
        suite.addTests(loader.loadTestsFromTestCase(case))

    return XMLTestRunner(output=str(os.path.join(test_reports_path)),
                         stream=sys.stdout).run(suite).wasSuccessful()
    suite.addTest(
        TC_3405_Consult_the_legal_notices_from_my_account_my_preferences_in_opt_in(
            "test"))
    suite.addTest(
        TC_3409_T015215_Active_tracking_from_my_account_set_opl_in_parameter(
            "test"))
    suite.addTest(
        TC_3396_T015183_consult_recommendation_implicite_profile_in_opt_in_mode_csa4_5(
            "test"))
    suite.addTest(
        TC_2982_T014407_change_the_audio_version_of_a_program_on_toolbox_ip_stream(
            "test"))
    suite.addTest(TC_10906_T016034_Consult_a_no_rented_paid_vod("test"))
    suite.addTest(
        TC_3221_T016765_When_record_is_in_progress_zap_to_another_program(
            "test"))
    suite.addTest(
        TC_3464_3468_T015841_sound_level_on_live_access_T015845_sound_level_on_live_zapping(
            "test"))
    suite.addTest(TC_3368_T014506_consult_prepaid_account("test"))
    suite.addTest(TC_9802_T999999_Auto_select_summary_option("test"))
    suite.addTest(TC_9798_T000000_Auto_Display_and_Use_Zapping_Banner("test"))

    runner = XMLTestRunner(createAndGetXmlDirPath())
    result = runner.run(suite)
    writeTsSummaryToFiles(result)
    if not result.wasSuccessful():
        exit(1)

    exit()
Exemplo n.º 43
0
        return runner.run(self_test_suite).wasSuccessful()

    if not self_valid():
        print("Check OPI format test script failed own tests. Aborting")
        sys.exit(1)

    return_values = []
    xml_parser = etree.XMLParser(remove_blank_text=True)

    # Add test suite a dynamic number of times with an argument.
    # unittest's test loader is unable to take arguments to test classes by default so have
    # to use the getTestCaseNames() syntax and explicitly add the argument ourselves.
    for filename in file_iterator(root_dir, single_file):

        print("Testing '{}'".format(filename))

        suite = unittest.TestSuite()

        try:
            root = etree.parse(filename, xml_parser)
        except LxmlError as e:
            print("XML failed to parse {}".format(e))
            return_values.append(False)
            continue

        suite.addTests([CheckOpiFormat(test, root) for test in loader.getTestCaseNames(CheckOpiFormat)])
        runner = XMLTestRunner(output=os.path.join(logs_dir, filename), stream=sys.stdout)
        return_values.append(runner.run(suite).wasSuccessful())

    sys.exit(False in return_values)
Exemplo n.º 44
0
 def self_valid():
     self_test_suite = unittest.TestSuite()
     self_test_suite.addTests(loader.loadTestsFromTestCase(TestCheckOpiFormatMethods))
     runner = XMLTestRunner(output=os.path.join(logs_dir, "check_opi_format"), stream=sys.stdout)
     return runner.run(self_test_suite).wasSuccessful()
Exemplo n.º 45
0
#!/usr/bin/env python

import sys
import unittest
from xmlrunner import XMLTestRunner


if __name__ == "__main__":
    py_version = "v{0}{1}".format(*sys.version_info[:2])
    test_suite = unittest.TestLoader().discover("djcopybook")
    runner = XMLTestRunner(verbosity=2, output="jenkins_reports", outsuffix=py_version)
    result = runner.run(test_suite)

    sys.exit(not result.wasSuccessful)
Exemplo n.º 46
0
import optparse
import os
import sys
from xmlrunner import XMLTestRunner

try:
    from unittest2 import TestLoader, TextTestRunner
except ImportError:
    from unittest import TestLoader, TextTestRunner

base_path = os.path.dirname(__file__)

parser = optparse.OptionParser()
parser.add_option("-x", "--xml-prefix",
                  dest="xml_prefix",
                  help="generate XML test report in given directory",
                  default=None)
(options, args) = parser.parse_args()

loader = TestLoader()
tests  = loader.discover(os.path.join(base_path, 'cvmfs/test'),
                         pattern='*_test.py')

runner = None
if options.xml_prefix:
  runner = XMLTestRunner(output=options.xml_prefix, verbosity=2)
else:
  runner = TextTestRunner(verbosity=2)

runner.run(tests)
Exemplo n.º 47
0
class TestRunnerBase(object):
    '''test runner base '''
    def __init__(self, context=None):
        self.tclist = []
        self.runner = None
        self.context = context if context else TestContext()
        self.test_options = None
        self.log_handler = None
        self.test_result = None
        self.run_time = None
        self.option_list = [
            make_option("-f", "--manifest", dest="manifest",
                        help="The test list file"),
            make_option("-x", "--xunit", dest="xunit",
                        help="Output result path of in xUnit XML format"),
            make_option("-l", "--log-dir", dest="logdir",
                        help="Set log dir."),
            make_option("-a", "--tag-expression", dest="tag",
                        help="Set tag expression to filter test cases."),
            make_option("-T", "--timeout", dest="timeout", default=60,
                        help="Set timeout for each test case."),
            make_option("-e", "--tests", dest="tests", action="append",
                        help="Run tests by dot separated module path")
        ]

    def __del__(self):
        """
        Because unittest.TestCase is a class object, it will exist as long as the python virtual machine process.
        So tc can't be released if we don't release them explicitly.
        """
        if hasattr(unittest.TestCase, "tc"):
            delattr(unittest.TestCase, "tc")

    @staticmethod
    def __get_tc_from_manifest(fname):
        '''get tc list from manifest format '''
        with open(fname, "r") as f:
            tclist = [n.strip() for n in f.readlines() \
                                if n.strip() and not n.strip().startswith('#')]
        return tclist

    @staticmethod
    def _get_log_dir(logdir):
        '''get the log directory'''
        if os.path.exists(logdir):
            shutil.rmtree(logdir)
        os.makedirs(logdir)
        return logdir

    def get_options(self, default=False):
        '''handle testrunner options'''
        parser = OptionParser(option_list=self.option_list, \
                                usage="usage: %prog [options]")
        if default:
            return parser.parse_args(args=[])[0]
        return parser.parse_args()[0]

    def configure(self, options):
        '''configure before testing'''
        self.test_options = options
        if options.xunit:
            try:
                from xmlrunner import XMLTestRunner
            except ImportError:
                raise Exception("unittest-xml-reporting not installed")
            self.runner = XMLTestRunner(stream=sys.stderr, \
                                        verbosity=2, output=options.xunit)
        else:
            self.runner = unittest.TextTestRunner(stream=sys.stderr, \
                                                  verbosity=2)

        if options.manifest:
            fbname, fext = os.path.splitext(os.path.basename(options.manifest))
            assert fbname == "manifest" or fext == ".manifest", \
                  "Please specify file name like xxx.manifest or manifest.xxx"
            self.tclist = self.__get_tc_from_manifest(options.manifest)

        if options.tests:
            tcs = [t[0:-3] if t.endswith(".py") else t[0:-1] \
                               if t.endswith("/") else t for t in options.tests]
            self.tclist.extend([tc.replace("/", ".") for tc in tcs])

        if options.logdir:
            logdir = self._get_log_dir(options.logdir)
            self.log_handler = LogHandler(logdir)

        try:
            self.context.def_timeout = int(options.timeout)
        except ValueError:
            print "timeout need an integer value"
            raise

    def result(self):
        '''output test result '''
        print "output test result..."

    def loadtest(self, names=None):
        '''load test suite'''
        if not names:
            names = self.tclist
        testloader = unittest.TestLoader()
        tclist = []
        for name in names:
            tset = testloader.loadTestsFromName(name)
            if tset.countTestCases() > 0:
                tclist.append(tset)
            elif tset._tests == []:
                tclist.append(testloader.discover(name, "[!_]*.py", os.path.curdir))
        return testloader.suiteClass(tclist)

    def filtertest(self, testsuite):
        '''filter test set'''
        if self.test_options.tag:
            return filter_tagexp(testsuite, self.test_options.tag)
        return testsuite

    def runtest(self, testsuite):
        '''run test suite'''
        starttime = time.time()
        self.test_result = self.runner.run(testsuite)
        self.run_time = time.time() - starttime

    def start(self, testsuite):
        '''start testing'''
        if self.log_handler:
            self.log_handler.start()
        set_timeout(testsuite, self.context.def_timeout)
        setattr(unittest.TestCase, "tc", self.context)
        self.runtest(testsuite)
        self.result()
        if self.log_handler:
            self.log_handler.end()
Exemplo n.º 48
0
import sys
import unittest

from os.path import dirname
from xmlrunner import XMLTestRunner

from mycroft.configuration import ConfigurationManager

__author__ = 'seanfitz, jdorleans'
if __name__ == "__main__":
    fail_on_error = "--fail-on-error" in sys.argv
    ConfigurationManager.load_local(['mycroft.ini'])

    tests = unittest.TestLoader().discover(dirname(__file__), "*.py")
    runner = XMLTestRunner("./build/report/tests")
    result = runner.run(tests)

    if fail_on_error and len(result.failures + result.errors) > 0:
        sys.exit(1)
Exemplo n.º 49
0
  # setting up logging
  log_opt = args['logging']
  logger = logging.getLogger(globals.LOGGER_NAME)
  if log_opt == 'INFO':
    logger.level = logging.INFO
  elif log_opt == 'DEBUG':
    logger.level = logging.DEBUG

  test_context = TestContext(
      ENGINE_DIRECTORY, DATA_DIRECTORY,
      args['eventserver_ip'], int(args['eventserver_port']))

  tests_dict = get_tests(test_context)
  test_names = args['tests']
  tests = []
  if test_names is not None:
    tests = [t for name, t in tests_dict.items() if name in test_names]
  else:
    tests = tests_dict.values()

  # Actual tests execution
  event_server_process = srun_bg('pio eventserver --ip {} --port {}'
      .format(test_context.es_ip, test_context.es_port))
  time.sleep(5)
  result = XMLTestRunner(verbosity=2, output='test-reports').run(
                unittest.TestSuite(tests))
  event_server_process.kill()

  if not result.wasSuccessful():
    sys.exit(1)
Exemplo n.º 50
0
#!/usr/bin/env python
"Defines the unit tests for the application."
from  xmlrunner import XMLTestRunner
import testsuite
import sys


if __name__ == '__main__': 
    runner = XMLTestRunner(sys.stdout)
    runner.run(testsuite.suite)