Example #1
0
 def _run_and_parse_test(self, case):
     output = StringIO()
     result = junitxml.JUnitXmlResult(output)
     result.startTestRun()
     case.run(result)
     result.stopTestRun()
     return xml.dom.minidom.parseString(output.getvalue())
Example #2
0
    def assertOutput(self, template, kind, kwargs=None):
        """Assert the expected output from a run for a given test.

        :param template: A string where common strings have been replaced by a
            keyword so we don't run into pep8 warnings for long lines.

        :param kind: A string used to select the kind of test.

        :param kwargs: A dict with more keywords for the template. This allows
            some tests to add more keywords when they are test specific.
        """
        if kwargs is None:
            kwargs = dict()
        out = StringIO()
        res = junitxml.JUnitXmlResult(out)
        # We don't care about timing here so we always return 0 which
        # simplifies matching the expected result
        res._now = lambda: 0.0
        res._duration = lambda f: 0.0
        test = get_case(kind)
        test.run(res)
        # due to the nature of JUnit XML output, nothing will be written to
        # the stream until stopTestRun() is called.
        res.stopTestRun()
        # To allow easier reading for template, we format some known values
        kwargs.update(dict(classname='%s.%s' % (test.__class__.__module__,
                                                test.__class__.__name__),
                           name=test._testMethodName))
        expected = template.format(**kwargs)
        self.assertEquals(expected, res._stream.getvalue())
    def assertOutput(self, template, kind, kwargs=None):
        """Assert the expected output from a run for a given test.

        :param template: A string where common strings have been replaced by a
            keyword so we don't run into pep8 warnings for long lines.

        :param kind: A string used to select the kind of test.

        :param kwargs: A dict with more keywords for the template. This allows
            some tests to add more keywords when they are test specific.
        """
        test = tests.get_case(kind)
        # Get subunit output (what subprocess produce)
        stream = StringIO()
        res = subunit.TestProtocolClient(stream)
        test.run(res)
        # Inject it again (what controlling process consumes)
        receiver = subunit.ProtocolTestCase(StringIO(stream.getvalue()))
        out = StringIO()
        res = junitxml.JUnitXmlResult(out)
        # We don't care about timing here so we always return 0 which
        # simplifies matching the expected result
        res._now = lambda: 0.0
        res._duration = lambda f: 0.0
        expected = tests.expand_template_for_test(template, test, kwargs)
        res.startTestRun()
        receiver.run(res)
        # due to the nature of JUnit XML output, nothing will be written to
        # the stream until stopTestRun() is called.
        res.stopTestRun()
        self.assertEquals(expected, out.getvalue())
Example #4
0
def run_tests_junit():
    """ Python unittest TestResult that outputs JUnit compatible XML."""
    import junitxml
    import sys
    result = junitxml.JUnitXmlResult(sys.stdout)
    result.startTestRun()
    suite = unittest.defaultTestLoader.discover("flaskapp",
                                                top_level_dir="flaskapp")
    suite.run(result)
    result.stopTestRun()
Example #5
0
 def run_suite(self, suite, **kwargs):
     if os.environ.get('HUMFREY_JUNIT_TEST'):
         import junitxml
         report_filename = os.path.join(os.path.dirname(__file__), '..', 'xmlresults.xml')
         with open(report_filename, 'w') as report:
             result = junitxml.JUnitXmlResult(report)
             result.startTestRun()
             suite.run(result)
             result.stopTestRun()
         return result
     else:
         return super(HumfreyTestSuiteRunner, self).run_suite(suite, **kwargs)
Example #6
0
 def setUp(self):
     self.output = StringIO()
     self.result = junitxml.JUnitXmlResult(self.output)
Example #7
0
 def test_with_stream(self):
     result = junitxml.JUnitXmlResult(StringIO())
Example #8
0
    suite.addTest(TestSubmitDataset.getTestSuite(select=select))
    suite.addTest(TestSubmitDatasetHandler.getTestSuite(select=select))
    suite.addTest(TestDirectoryListingHandler.getTestSuite(select=select))
    suite.addTest(TestMetadataMerging.getTestSuite(select=select))
    suite.addTest(TestGetDatasetMetadataHandler.getTestSuite(select=select))
    suite.addTest(TestHttpSession.getTestSuite(select=select))
    return suite


from MiscLib import TestUtils
import junitxml

if __name__ == "__main__":
    print "============================================================"
    print "This test suite needs to run under a Linux operating system"
    print "Edit TestConfig.py to specify hostname and other parameters"
    print "Create test accounts on target system to match TestConfig.py"
    print "============================================================"
    TestConfig.setDatasetsBaseDir(".")

    if len(sys.argv) >= 2 and sys.argv[1] == "xml":
        with open('xmlresults.xml', 'w') as report:
            result = junitxml.JUnitXmlResult(report)
            result.startTestRun()
            getTestSuite().run(result)
            result.stopTestRun()
    else:
        TestUtils.runTests("TestAll", getTestSuite, sys.argv)

# End.
Example #9
0
def make_junit_result(stream):
    """Make a result that emits JUnit-compatible XML results."""
    return junitxml.JUnitXmlResult(stream)
Example #10
0
    def test_generate(self):
        #run this a million times and verify all output is valid
        for i in range(1, 1000000):
            a_letter = main.LETTERS(0)
            self.assertTrue(a_letter.generate() in range(1, 27),
                            msg="%d out of range" % a_letter.current_value)


if __name__ == '__main__':

    # Test Suites
    numberTests = unittest.TestLoader().loadTestsFromTestCase(TestNumberOutput)
    letterTests = unittest.TestLoader().loadTestsFromTestCase(TestLetterOutput)
    # Combine Test Suites
    allTests = unittest.TestSuite([numberTests, letterTests])

    fp = file('test_results.xml', 'wb')
    result = junitxml.JUnitXmlResult(fp)

    # Execute
    result.startTestRun()
    allTests.run(result)
    result.stopTestRun()

    # Print Result
    if result.wasSuccessful():
        print "Test Passed"
    else:
        print "Test Failure"
Example #11
0
def runtests(test_regexps,
             results_directory,
             out,
             test_dir='.',
             collect_only=False,
             browser_factory=None,
             report_format='console',
             shared_directory=None,
             screenshots_on=False,
             concurrency_num=1,
             failfast=False,
             debug=False,
             extended=False,
             includes=None,
             excludes=None):
    if not os.path.isdir(test_dir):
        raise RuntimeError('Specified directory %r does not exist' %
                           (test_dir, ))
    if browser_factory is None and collect_only is False:
        raise RuntimeError('A browser must be specified')
    shared_directory = find_shared_directory(test_dir, shared_directory)
    config.shared_directory = shared_directory
    if shared_directory is not None:
        sys.path.append(shared_directory)

    loader = loaders.SSTestLoader(results_directory, browser_factory,
                                  screenshots_on, debug, extended)
    alltests = loader.suiteClass()
    alltests.addTests(loader.discoverTestsFromTree(test_dir))
    alltests = filters.include_regexps(test_regexps, alltests)
    alltests = filters.exclude_regexps(excludes, alltests)

    if not alltests.countTestCases():
        # FIXME: Really needed ? Can't we just rely on the number of tests run
        # ? -- vila 2013-06-04
        raise RuntimeError('Did not find any tests')

    if collect_only:
        for t in testtools.testsuite.iterate_tests(alltests):
            out.write(t.id() + '\n')
        return 0

    txt_res = results.TextTestResult(out, failfast=failfast, verbosity=2)
    if report_format == 'xml':
        results_file = os.path.join(results_directory, 'results.xml')
        xml_stream = file(results_file, 'wb')
        result = testtools.testresult.MultiTestResult(
            txt_res, junitxml.JUnitXmlResult(xml_stream))
        result.failfast = failfast
    else:
        result = txt_res

    if concurrency_num == 1:
        suite = alltests
    else:
        suite = testtools.ConcurrentTestSuite(
            alltests, concurrency.fork_for_tests(concurrency_num))

    result.startTestRun()
    try:
        suite.run(result)
    except KeyboardInterrupt:
        out.write('Test run interrupted\n')
    result.stopTestRun()

    return len(result.failures) + len(result.errors)
def runtests(test_regexps,
             test_dir='.',
             collect_only=False,
             browser_factory=None,
             report_format='console',
             shared_directory=None,
             screenshots_on=False,
             failfast=False,
             debug=False,
             extended=False,
             includes=None,
             excludes=None):

    config.results_directory = os.path.abspath('results')
    actions._make_results_dir()

    if test_dir == 'selftests':
        # XXXX horrible hardcoding
        # selftests should be a command instead
        package_dir = os.path.dirname(__file__)
        os.chdir(os.path.dirname(package_dir))
        test_dir = os.path.join('.', 'sst', 'selftests')
    else:
        if not os.path.isdir(test_dir):
            msg = 'Specified directory %r does not exist' % test_dir
            print msg
            sys.exit(1)
    shared_directory = find_shared_directory(test_dir, shared_directory)
    config.shared_directory = shared_directory
    sys.path.append(shared_directory)

    if browser_factory is None:
        # TODO: We could raise an error instead as providing a default value
        # makes little sense here -- vila 2013-04-11
        browser_factory = browsers.FirefoxFactory()

    test_loader = loader.TestLoader(browser_factory, screenshots_on, debug,
                                    extended)
    alltests = test_loader.suiteClass()
    alltests.addTests(
        test_loader.discoverTests(test_dir,
                                  file_loader_class=loader.ScriptLoader,
                                  dir_loader_class=loader.ScriptDirLoader))

    alltests = filters.filter_by_regexps(test_regexps, alltests)
    alltests = filters.exclude_regexps(excludes, alltests)

    print ''
    print '  %s test cases loaded\n' % alltests.countTestCases()
    print '--------------------------------------------------------------'

    if not alltests.countTestCases():
        print 'Error: Did not find any tests'
        sys.exit(1)

    if collect_only:
        print 'Collect-Only Enabled, Not Running Tests...\n'
        print 'Tests Collected:'
        print '-' * 16
        for t in testtools.testsuite.iterate_tests(alltests):
            print t.id()
        return

    text_result = result.TextTestResult(sys.stdout,
                                        failfast=failfast,
                                        verbosity=2)
    if report_format == 'xml':
        results_file = os.path.join(config.results_directory, 'results.xml')
        xml_stream = file(results_file, 'wb')
        res = testtools.testresult.MultiTestResult(
            text_result,
            junitxml.JUnitXmlResult(xml_stream),
        )
        res.failfast = failfast
    else:
        res = text_result

    res.startTestRun()
    try:
        alltests.run(res)
    except KeyboardInterrupt:
        print >> sys.stderr, 'Test run interrupted'
    finally:
        # XXX should warn on cases that were specified but not found
        pass
    res.stopTestRun()

    return len(res.failures) + len(res.errors)