def test_execution_with_dryrun(self): """ Test that the actual tests are not executed in dryrun mode """ tinc_test_loader = tinctest.TINCTestLoader() pwd = os.path.dirname(inspect.getfile(self.__class__)) test_dir = os.path.join(pwd, 'mocktests') tinc_test_suite = tinc_test_loader.discover( start_dirs=[test_dir], patterns=['dryrun_test_sample2.py'], top_level_dir=test_dir, dryrun=True) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) self.assertEquals(tinc_test_result.testsRun, 3) # This variable should be 0 even after the tests are run in the dryrun mode since # we do not expect to run the test code self.assertEquals(tinctest.test.dryrun.tests_run_count, 0) #Without dryrun it should have set the variable tinc_test_suite = tinc_test_loader.discover( start_dirs=[test_dir], patterns=['dryrun_test_sample2.py'], top_level_dir=test_dir) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) self.assertEquals(tinc_test_result.testsRun, 3) # tests should have been run and tests_run_count should be incremented self.assertEquals(tinctest.test.dryrun.tests_run_count, 8)
def test_sql_test_case_with_discovery_queries(self): tinc_test_loader = tinctest.TINCTestLoader() pwd = os.path.dirname(inspect.getfile(self.__class__)) test_dir = os.path.join(pwd, 'sql_pattern') tinc_test_suite = tinc_test_loader.discover(start_dirs = [test_dir], patterns = ['sql_pattern.py'], top_level_dir = None, query_handler = TINCDiscoveryQueryHandler(['method=test_functional_*'])) test_case = None test_result = None with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_runner = TINCTestRunner(stream = buffer, descriptions = True, verbosity = 1) test_result = tinc_test_runner.run(tinc_test_suite) self.assertEqual(test_result.testsRun, 6) self.assertEqual(len(test_result.skipped), 6) # Queries using metadata from sql files tinc_test_loader = tinctest.TINCTestLoader() pwd = os.path.dirname(inspect.getfile(self.__class__)) test_dir = os.path.join(pwd, 'sql_pattern') tinc_test_suite = tinc_test_loader.discover(start_dirs = [test_dir], patterns = ['sql_pattern.py'], top_level_dir = None, query_handler = TINCDiscoveryQueryHandler(['method=test_functional_* and tags != long'])) test_case = None test_result = None with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_runner = TINCTestRunner(stream = buffer, descriptions = True, verbosity = 1) test_result = tinc_test_runner.run(tinc_test_suite) self.assertEqual(test_result.testsRun, 3) self.assertEqual(len(test_result.skipped), 3)
def test_some_combination(self): # some combinations of the previous 4, this will require building a test suite # and running that test suite with the TINCTextTestResult suite = tinctest.TINCTestSuite() suite.addTest(MockTINCTestCaseForResults('test_success')) suite.addTest(MockTINCTestCaseForResults('test_failure')) suite.addTest(MockTINCTestCaseForResults('test_error')) suite.addTest(MockTINCTestCaseForResults('test_skip')) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = TINCTestResultSet(buffer, True, 1) suite.run(tinc_test_result) text = buffer.getvalue() self.assertEqual(tinc_test_result.testsRun, 4) self.assertEqual(len(tinc_test_result.failures), 1) self.assertEqual(len(tinc_test_result.errors), 1) self.assertEqual(len(tinc_test_result.skipped), 1) self.assertRegexpMatches( text, 'MockTINCTestCaseForResults.test_success \.\.\. .* \.\.\. ok') self.assertRegexpMatches( text, 'MockTINCTestCaseForResults.test_failure \.\.\. .* \.\.\. FAIL') self.assertRegexpMatches( text, 'MockTINCTestCaseForResults.test_error \.\.\. .* \.\.\. ERROR') self.assertRegexpMatches( text, 'MockTINCTestCaseForResults.test_skip \.\.\. .* \.\.\. skipped .*')
def test_restart_on_failure(self): test_loader = tinctest.TINCTestLoader() test_suite = test_loader.loadTestsFromName( 'mpp.models.regress.mpp_tc.regress_mpp_test_case.MockMPPTestCase.test_restart_on_failure' ) self.assertIsNotNone(test_suite) self.assertTrue(len(test_suite._tests), 1) for test in test_suite._tests: test.__class__.__unittest_skip__ = False with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_runner = tinctest.TINCTestRunner(stream=buffer, descriptions=True, verbosity=1) test_result = tinc_test_runner.run(test_suite) self.assertEqual(test_result.testsRun, 1) self.assertEqual(len(test_result.errors), 0) self.assertEqual(len(test_result.skipped), 0) self.assertEqual(len(test_result.failures), 1) # TODO may be add a check later on to see if we actually restart the cluster. expected_log_file = os.path.join(MockMPPTestCase.get_out_dir(), test._testMethodName + '.logs') self.assertTrue(os.path.exists(expected_log_file)) self.assertTrue(os.path.getsize(expected_log_file) > 0)
def test_failure(self): test_case = MockConcurrencyTestCase('test_failure') test_case.__class__.__unittest_skip__ = False with closing(_WritelnDecorator(StringIO())) as buffer: test_result = TINCTextTestResult(buffer, True, 1) test_case.run(test_result) self.assertEqual(len(test_result.failures), 1)
def test_skipped_tests_with_dryrun(self): """ Test that dryrun reports tests with skip metadata without running setup class / setup / teardownclass/ teardown fixtures """ tinc_test_loader = tinctest.TINCTestLoader() pwd = os.path.dirname(inspect.getfile(self.__class__)) test_dir = os.path.join(pwd, 'mocktests') tinc_test_suite = tinc_test_loader.discover(start_dirs = [test_dir], patterns = ['dryrun_test_sample2.py'], top_level_dir = test_dir, dryrun=True ) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) self.assertEquals(tinc_test_result.testsRun, 3) # This variable should be 0 even after the tests are run in the dryrun mode since # we do not expect to run the test code self.assertEquals(tinctest.test.dryrun.tests_run_count, 0) #test03 in the result should be skipped correctly self.assertEquals(len(tinc_test_result.skipped),1) self.assertEquals(tinc_test_result.skipped[0][1], "just skipping")
def test_skipped_tests_with_dryrun(self): """ Test that dryrun reports tests with skip metadata without running setup class / setup / teardownclass/ teardown fixtures """ tinc_test_loader = tinctest.TINCTestLoader() pwd = os.path.dirname(inspect.getfile(self.__class__)) test_dir = os.path.join(pwd, 'mocktests') tinc_test_suite = tinc_test_loader.discover( start_dirs=[test_dir], patterns=['dryrun_test_sample2.py'], top_level_dir=test_dir, dryrun=True) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) self.assertEquals(tinc_test_result.testsRun, 3) # This variable should be 0 even after the tests are run in the dryrun mode since # we do not expect to run the test code self.assertEquals(tinctest.test.dryrun.tests_run_count, 0) #test03 in the result should be skipped correctly self.assertEquals(len(tinc_test_result.skipped), 1) self.assertEquals(tinc_test_result.skipped[0][1], "just skipping")
def test_module_import_failures_with_dryrun(self): """ Test that module import failures are reported correctly in dryrun """ tinc_test_loader = tinctest.TINCTestLoader() pwd = os.path.dirname(inspect.getfile(self.__class__)) test_dir = os.path.join(pwd, 'mocktests') tinc_test_suite = tinc_test_loader.discover(start_dirs = [test_dir], patterns = ['dryrun_test_invalid_import.py'], top_level_dir = test_dir, dryrun=True ) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) self.assertEquals(tinc_test_result.testsRun, 1) # This variable should be 0 even after the tests are run in the dryrun mode since # we do not expect to run the test code self.assertEquals(tinctest.test.dryrun.tests_run_count, 0) self.assertEquals(len(tinc_test_result.errors),1) test_instance = tinc_test_result.errors[0][0] traceback_msg = tinc_test_result.errors[0][1] self.assertEquals(test_instance.__class__.__name__, "TINCModuleImportFailure") self.assertTrue("Failed to import test module" in traceback_msg)
def test_module_import_failures_with_dryrun(self): """ Test that module import failures are reported correctly in dryrun """ tinc_test_loader = tinctest.TINCTestLoader() pwd = os.path.dirname(inspect.getfile(self.__class__)) test_dir = os.path.join(pwd, 'mocktests') tinc_test_suite = tinc_test_loader.discover( start_dirs=[test_dir], patterns=['dryrun_test_invalid_import.py'], top_level_dir=test_dir, dryrun=True) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) self.assertEquals(tinc_test_result.testsRun, 1) # This variable should be 0 even after the tests are run in the dryrun mode since # we do not expect to run the test code self.assertEquals(tinctest.test.dryrun.tests_run_count, 0) self.assertEquals(len(tinc_test_result.errors), 1) test_instance = tinc_test_result.errors[0][0] traceback_msg = tinc_test_result.errors[0][1] self.assertEquals(test_instance.__class__.__name__, "TINCModuleImportFailure") self.assertTrue("Failed to import test module" in traceback_msg)
def test_run_test_with_data_provider_complicated(self): tinc_test_loader = tinctest.TINCTestLoader() tinc_test_suite = tinc_test_loader.loadTestsFromName('tinctest.test.test_data_provider.MockTINCTestCaseWithDataProviderComplicated.test_with_data_provider_complicated') with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) # This should have run two tests self.assertEquals(tinc_test_result.testsRun, 6)
def test_run_test_with_data_provider_no_expand(self): tinc_test_loader = tinctest.TINCTestLoader() tinc_test_suite = tinc_test_loader.loadTestsFromName('tinctest.test.test_data_provider.MockTINCTestCaseWithDataProvider.test_with_data_provider', expand = False) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) # This should have run one test, since expand is False self.assertEquals(tinc_test_result.testsRun, 1)
def test_runner(self): test_loader = tinctest.TINCTestLoader() tinc_test_suite = test_loader.loadTestsFromName('tinctest.models.concurrency.test.test_concurrency_test_case.MockConcurrencyTestCase') for test in tinc_test_suite._tests: if not 'test_skip' in test.name: test.__class__.__unittest_skip__ = False with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_runner = TINCTestRunner(stream = buffer, descriptions = True, verbosity = 1) tinc_test_runner.run(tinc_test_suite)
def test_run_test_with_data_provider_no_expand(self): tinc_test_loader = tinctest.TINCTestLoader() tinc_test_suite = tinc_test_loader.loadTestsFromName( 'tinctest.test.test_data_provider.MockTINCTestCaseWithDataProvider.test_with_data_provider', expand=False) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) # This should have run one test, since expand is False self.assertEquals(tinc_test_result.testsRun, 1)
def test_run_test_with_data_provider_complicated(self): tinc_test_loader = tinctest.TINCTestLoader() tinc_test_suite = tinc_test_loader.loadTestsFromName( 'tinctest.test.test_data_provider.MockTINCTestCaseWithDataProviderComplicated.test_with_data_provider_complicated' ) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) # This should have run two tests self.assertEquals(tinc_test_result.testsRun, 6)
def test_run_sql_test_optimizer_minidump_on_failure2(self): """ Test whether we gather minidumps on failures when the test is exeucted with optimizer_mode both. """ test_loader = tinctest.TINCTestLoader() test_suite = test_loader.loadTestsFromName('mpp.models.regress.sql_related.regress_sql_test_case.' + \ 'regress_sql_test_case.' + \ 'MockSQLTestCaseWithOptimizerBoth.test_query02') self.assertIsNotNone(test_suite) new_test_suite = tinctest.TINCTestSuite() self.assertEquals(test_suite.countTestCases(), 2) test_result = None test_case = None for test in test_suite._tests: if 'test_query02_orca' in test.name: test.__class__.__unittest_skip__ = False test_case = test new_test_suite.addTest(test) self.assertIsNotNone(test_case) if os.path.exists(test_case.get_out_dir()): shutil.rmtree(test_case.get_out_dir()) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_runner = TINCTestRunner(stream=buffer, descriptions=True, verbosity=1) test_result = tinc_test_runner.run(new_test_suite) self.assertEqual(test_result.testsRun, 1) self.assertEqual(len(test_result.errors), 0) self.assertEqual(len(test_result.skipped), 0) self.assertEqual(len(test_result.failures), 1) self.assertTrue( os.path.exists( os.path.join(test_case.get_out_dir(), 'query02_orca.sql'))) self.assertTrue( os.path.exists( os.path.join(test_case.get_out_dir(), 'query02_orca.out'))) self.assertTrue( self._check_str_in_file( "SET optimizer=on;", os.path.join(test_case.get_out_dir(), 'query02_orca.sql'))) self.assertTrue( self._check_str_in_file( "SET optimizer=on;", os.path.join(test_case.get_out_dir(), 'query02_orca.out'))) # Verify that we collect minidump on failure for optimizer execution mode self.assertTrue( os.path.exists( os.path.join(test_case.get_out_dir(), 'query02_minidump.mdp')))
def test_error(self): tinc_test_case = MockTINCTestCaseForResults('test_error') tinc_test_case.__class__.__unittest_skip__ = False with closing(_WritelnDecorator(StringIO())) as buffer: # Run tinc test with verbosity=2 similar to Pulse tinc_test_result = tinctest.TINCTextTestResult(buffer, descriptions=True, verbosity=2) tinc_test_case.run(tinc_test_result) text = buffer.getvalue() match_object = self.p.match(text) self.assertEqual(match_object.group(1), 'MockTINCTestCaseForResults.test_error') self.assertEqual(match_object.group(5), 'ERROR')
def _discover_and_run_tests(self, start_dirs, patterns, top_level_dir, query_handler): tinc_test_loader = tinctest.TINCTestLoader() tinc_test_suite = tinc_test_loader.discover(start_dirs = start_dirs, patterns = patterns, top_level_dir = None, query_handler = query_handler ) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) return tinc_test_result
def test_skip(self): tinc_test_case = MockTINCTestCaseForResults('test_skip') tinc_test_case.__class__.__unittest_skip__ = False with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTextTestResult(buffer, True, 1) tinc_test_case.run(tinc_test_result) text = buffer.getvalue() self.assertEqual(tinc_test_result.testsRun, 1) self.assertEqual(len(tinc_test_result.failures), 0) self.assertEqual(len(tinc_test_result.skipped), 1) self.assertEqual(len(tinc_test_result.errors), 0) self.assertRegexpMatches(text, 'MockTINCTestCaseForResults.test_skip \.\.\. .* \.\.\. skipped .*')
def test_suite_construction_with_discover(self): tinc_test_loader = tinctest.TINCTestLoader() pwd = os.path.dirname(inspect.getfile(self.__class__)) test_dir = os.path.join(pwd, 'data_provider') tinc_test_suite = tinc_test_loader.discover(start_dirs = [test_dir], patterns = ['test_*.py'], top_level_dir = test_dir) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) # This should have run 11 tests self.assertEquals(tinc_test_result.testsRun, 11)
def test_suite_construction_with_discover(self): tinc_test_loader = tinctest.TINCTestLoader() pwd = os.path.dirname(inspect.getfile(self.__class__)) test_dir = os.path.join(pwd, 'data_provider') tinc_test_suite = tinc_test_loader.discover(start_dirs=[test_dir], patterns=['test_*.py'], top_level_dir=test_dir) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) # This should have run 11 tests self.assertEquals(tinc_test_result.testsRun, 11)
def test_runner(self): test_loader = tinctest.TINCTestLoader() tinc_test_suite = test_loader.loadTestsFromName( 'tinctest.models.concurrency.test.test_concurrency_test_case.MockConcurrencyTestCase' ) for test in tinc_test_suite._tests: if not 'test_skip' in test.name: test.__class__.__unittest_skip__ = False with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_runner = TINCTestRunner(stream=buffer, descriptions=True, verbosity=1) tinc_test_runner.run(tinc_test_suite)
def test_loading_with_decorator_discover(self): tinc_test_loader = tinctest.TINCTestLoader() pwd = os.path.dirname(inspect.getfile(self.__class__)) test_dir = os.path.join(pwd, 'skip_loading') tinc_test_suite = tinc_test_loader.discover(start_dirs = [test_dir], patterns = ['test_load_with_*.py'], top_level_dir = test_dir) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) # This should have run 2 tests as test_01 # would have been skipped loading at the base class with decorator self.assertEquals(tinc_test_result.testsRun, 2)
def test_scenario_with_custom_result_fail(self): test_loader = tinctest.TINCTestLoader() tinc_test_suite = test_loader.loadTestsFromName('tinctest.models.scenario.test.test_scenario_test_case.MockScenarioTestCaseWithCustomResult.test_with_custom_result_fail') for test in tinc_test_suite._tests: test.__class__.__unittest_skip__ = False with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_runner = TINCTestRunner(stream = buffer, descriptions = True, verbosity = 1) tinc_test_runner.run(tinc_test_suite) for test in tinc_test_suite._tests: if 'test_with_custom_result_fail' in test.name: self.assertEqual(test.s, 0) self.assertEqual(test.f, 1)
def test_suite_construction_with_discover_and_tinc_queries(self): tinc_test_loader = tinctest.TINCTestLoader() pwd = os.path.dirname(inspect.getfile(self.__class__)) test_dir = os.path.join(pwd, 'data_provider') query_handler = TINCDiscoveryQueryHandler("tags=tag1") tinc_test_suite = tinc_test_loader.discover(start_dirs = [test_dir], patterns = ['test_*.py'], top_level_dir = test_dir, query_handler = query_handler) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) # This should have filtered 4 tests and hence run 7 tests self.assertEquals(tinc_test_result.testsRun, 7)
def test_discover_with_invalid_imports(self): tinc_test_loader = tinctest.TINCTestLoader() pwd = os.path.dirname(inspect.getfile(self.__class__)) test_dir = os.path.join(pwd, 'data_provider') tinc_test_suite = tinc_test_loader.discover(start_dirs = [test_dir], patterns = ['discover_invalid_imports.py'], top_level_dir = test_dir) self.assertEquals(len(tinc_test_suite._tests), 1) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) # This should have thrown a ModuleImportFailure error self.assertTrue('ModuleImportFailure' in str(tinc_test_result.errors[0][0]))
def test_loading_with_decorator_discover(self): tinc_test_loader = tinctest.TINCTestLoader() pwd = os.path.dirname(inspect.getfile(self.__class__)) test_dir = os.path.join(pwd, 'skip_loading') tinc_test_suite = tinc_test_loader.discover( start_dirs=[test_dir], patterns=['test_load_with_*.py'], top_level_dir=test_dir) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) # This should have run 2 tests as test_01 # would have been skipped loading at the base class with decorator self.assertEquals(tinc_test_result.testsRun, 2)
def test_execution_with_dryrun(self): """ Test that the actual tests are not executed in dryrun mode """ tinc_test_loader = tinctest.TINCTestLoader() pwd = os.path.dirname(inspect.getfile(self.__class__)) test_dir = os.path.join(pwd, 'mocktests') tinc_test_suite = tinc_test_loader.discover(start_dirs = [test_dir], patterns = ['dryrun_test_sample2.py'], top_level_dir = test_dir, dryrun=True ) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) self.assertEquals(tinc_test_result.testsRun, 3) # This variable should be 0 even after the tests are run in the dryrun mode since # we do not expect to run the test code self.assertEquals(tinctest.test.dryrun.tests_run_count, 0) #Without dryrun it should have set the variable tinc_test_suite = tinc_test_loader.discover(start_dirs = [test_dir], patterns = ['dryrun_test_sample2.py'], top_level_dir = test_dir ) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) self.assertEquals(tinc_test_result.testsRun, 3) # tests should have been run and tests_run_count should be incremented self.assertEquals(tinctest.test.dryrun.tests_run_count, 8)
def run_test(self, method_name, skip_expected, test_case_prefix = 'tinctest.test.test_version_check.MockTINCTestCaseWithGetVersion'): loader = TINCTestLoader() tinc_test_suite = loader.loadTestsFromName('%s.%s' %(test_case_prefix, method_name), dryrun=True) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTextTestResult(buffer, True, 1) tinc_test_suite.run(tinc_test_result) text = buffer.getvalue() self.assertEqual(tinc_test_result.testsRun, 1) self.assertEqual(len(tinc_test_result.failures), 0) if skip_expected: self.assertEqual(len(tinc_test_result.skipped), 1) else: self.assertEqual(len(tinc_test_result.skipped), 0) self.assertEqual(len(tinc_test_result.errors), 0)
def test_sql_test_case_discovery_with_pattern_matching(self): tinc_test_loader = tinctest.TINCTestLoader() pwd = os.path.dirname(inspect.getfile(self.__class__)) test_dir = os.path.join(pwd, 'sql_pattern') tinc_test_suite = tinc_test_loader.discover(start_dirs = [test_dir], patterns = ['sql_pattern.py'], top_level_dir = None, query_handler = None) test_case = None test_result = None with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_runner = TINCTestRunner(stream = buffer, descriptions = True, verbosity = 1) test_result = tinc_test_runner.run(tinc_test_suite) self.assertEqual(test_result.testsRun, 12) self.assertEqual(len(test_result.skipped), 12)
def test_suite_construction_with_discover_and_tinc_queries(self): tinc_test_loader = tinctest.TINCTestLoader() pwd = os.path.dirname(inspect.getfile(self.__class__)) test_dir = os.path.join(pwd, 'data_provider') query_handler = TINCDiscoveryQueryHandler("tags=tag1") tinc_test_suite = tinc_test_loader.discover( start_dirs=[test_dir], patterns=['test_*.py'], top_level_dir=test_dir, query_handler=query_handler) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) # This should have filtered 4 tests and hence run 7 tests self.assertEquals(tinc_test_result.testsRun, 7)
def test_skip_when_tag_in_sql_file(self): test_loader = TINCTestLoader() test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseForSkip) test_case = None for case in test_suite._tests: if case.name == "MockSQLTestCaseForSkip.test_query01": test_case = case self.assertNotEqual(test_case, None) self.assertEqual(test_case.name, "MockSQLTestCaseForSkip.test_query01") with closing(_WritelnDecorator(StringIO())) as buffer: test_result = TINCTextTestResult(buffer, True, 1) test_case.run(test_result) self.assertEqual(test_result.testsRun, 1) self.assertEqual(len(test_result.failures), 0) self.assertEqual(len(test_result.skipped), 1) self.assertEqual(len(test_result.errors), 0)
def test_sanity_failure(self): test_loader = tinctest.TINCTestLoader() test_suite = test_loader.loadTestsFromName('mpp.models.regress.mpp_tc.regress_mpp_test_case.MockMPPTestCase.test_failure') self.assertIsNotNone(test_suite) self.assertTrue(len(test_suite._tests), 1) for test in test_suite._tests: test.__class__.__unittest_skip__ = False with closing(_WritelnDecorator(StringIO())) as buffer: test_result = tinctest.TINCTextTestResult(buffer, True, 1) test_suite.run(test_result) self.assertEqual(test_result.testsRun, 1) self.assertEqual(len(test_result.errors), 0) self.assertEqual(len(test_result.skipped), 0) self.assertEqual(len(test_result.failures), 1)
def test_run_sql_test_optimizer_minidump_on_failure2(self): """ Test whether we gather minidumps on failures when the test is exeucted with optimizer_mode both. """ test_loader = tinctest.TINCTestLoader() test_suite = test_loader.loadTestsFromName('mpp.models.regress.sql_related.regress_sql_test_case.' + \ 'regress_sql_test_case.' + \ 'MockSQLTestCaseWithOptimizerBoth.test_query02') self.assertIsNotNone(test_suite) new_test_suite = tinctest.TINCTestSuite() self.assertEquals(test_suite.countTestCases(), 2) test_result = None test_case = None for test in test_suite._tests: if 'test_query02_orca' in test.name: test.__class__.__unittest_skip__ = False test_case = test new_test_suite.addTest(test) self.assertIsNotNone(test_case) if os.path.exists(test_case.get_out_dir()): shutil.rmtree(test_case.get_out_dir()) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_runner = TINCTestRunner(stream = buffer, descriptions = True, verbosity = 1) test_result = tinc_test_runner.run(new_test_suite) self.assertEqual(test_result.testsRun, 1) self.assertEqual(len(test_result.errors), 0) self.assertEqual(len(test_result.skipped), 0) self.assertEqual(len(test_result.failures), 1) self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query02_orca.sql'))) self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query02_orca.out'))) self.assertTrue(self._check_str_in_file("SET optimizer=on;", os.path.join(test_case.get_out_dir(), 'query02_orca.sql'))) self.assertTrue(self._check_str_in_file("SET optimizer=on;", os.path.join(test_case.get_out_dir(), 'query02_orca.out'))) # Verify that we collect minidump on failure for optimizer execution mode self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'query02_minidump.mdp')))
def test_filtered_tests_with_dryrun(self): """ Test that tests filtered out with tinc queries are reported correctly in dryrun """ tinc_test_loader = tinctest.TINCTestLoader() pwd = os.path.dirname(inspect.getfile(self.__class__)) test_dir = os.path.join(pwd, 'mocktests') query_handler = TINCDiscoveryQueryHandler("tags != hawq") tinc_test_suite = tinc_test_loader.discover( start_dirs=[test_dir], patterns=['dryrun_test_sample2.py'], top_level_dir=test_dir, query_handler=query_handler, dryrun=True) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) self.assertEquals(tinc_test_result.testsRun, 3) # This variable should be 0 even after the tests are run in the dryrun mode since # we do not expect to run the test code self.assertEquals(tinctest.test.dryrun.tests_run_count, 0) # Two tests should be skipped one because of skip metadata and one because of filtering out self.assertEquals(len(tinc_test_result.skipped), 2) metadata_skip_test_tuple = None queries_skip_test_tuple = None # tinc_test_result.skipped will be a list of tuple containing test instance and the skip message for test_tuple in tinc_test_result.skipped: if test_tuple[0]._testMethodName == 'test_03': metadata_skip_test_tuple = test_tuple elif test_tuple[0]._testMethodName == 'test_01': queries_skip_test_tuple = test_tuple self.assertIsNotNone(metadata_skip_test_tuple) self.assertIsNotNone(queries_skip_test_tuple) self.assertTrue( "Filtering out test as it did not satisy tinc queries check" in queries_skip_test_tuple[1]) self.assertTrue("just skipping" in metadata_skip_test_tuple[1])
def test_sanity_failure(self): test_loader = tinctest.TINCTestLoader() test_suite = test_loader.loadTestsFromName( 'mpp.models.regress.mpp_tc.regress_mpp_test_case.MockMPPTestCase.test_failure' ) self.assertIsNotNone(test_suite) self.assertTrue(len(test_suite._tests), 1) for test in test_suite._tests: test.__class__.__unittest_skip__ = False with closing(_WritelnDecorator(StringIO())) as buffer: test_result = tinctest.TINCTextTestResult(buffer, True, 1) test_suite.run(test_result) self.assertEqual(test_result.testsRun, 1) self.assertEqual(len(test_result.errors), 0) self.assertEqual(len(test_result.skipped), 0) self.assertEqual(len(test_result.failures), 1)
def test_filtered_tests_with_dryrun(self): """ Test that tests filtered out with tinc queries are reported correctly in dryrun """ tinc_test_loader = tinctest.TINCTestLoader() pwd = os.path.dirname(inspect.getfile(self.__class__)) test_dir = os.path.join(pwd, 'mocktests') query_handler = TINCDiscoveryQueryHandler("tags != hawq") tinc_test_suite = tinc_test_loader.discover(start_dirs = [test_dir], patterns = ['dryrun_test_sample2.py'], top_level_dir = test_dir, query_handler = query_handler, dryrun = True ) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) self.assertEquals(tinc_test_result.testsRun, 3) # This variable should be 0 even after the tests are run in the dryrun mode since # we do not expect to run the test code self.assertEquals(tinctest.test.dryrun.tests_run_count, 0) # Two tests should be skipped one because of skip metadata and one because of filtering out self.assertEquals(len(tinc_test_result.skipped), 2) metadata_skip_test_tuple = None queries_skip_test_tuple = None # tinc_test_result.skipped will be a list of tuple containing test instance and the skip message for test_tuple in tinc_test_result.skipped: if test_tuple[0]._testMethodName == 'test_03': metadata_skip_test_tuple = test_tuple elif test_tuple[0]._testMethodName == 'test_01': queries_skip_test_tuple = test_tuple self.assertIsNotNone(metadata_skip_test_tuple) self.assertIsNotNone(queries_skip_test_tuple) self.assertTrue("Filtering out test as it did not satisy tinc queries check" in queries_skip_test_tuple[1]) self.assertTrue("just skipping" in metadata_skip_test_tuple[1])
def test_some_combination(self): # some combinations of the previous 4, this will require building a test suite # and running that test suite with the TINCTextTestResult suite = tinctest.TINCTestSuite() suite.addTest(MockTINCTestCaseForResults('test_success')) suite.addTest(MockTINCTestCaseForResults('test_failure')) suite.addTest(MockTINCTestCaseForResults('test_error')) suite.addTest(MockTINCTestCaseForResults('test_skip')) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = TINCTestResultSet(buffer, True, 1) suite.run(tinc_test_result) text = buffer.getvalue() self.assertEqual(tinc_test_result.testsRun, 4) self.assertEqual(len(tinc_test_result.failures), 1) self.assertEqual(len(tinc_test_result.errors), 1) self.assertEqual(len(tinc_test_result.skipped), 1) self.assertRegexpMatches(text, 'MockTINCTestCaseForResults.test_success \.\.\. .* \.\.\. ok') self.assertRegexpMatches(text, 'MockTINCTestCaseForResults.test_failure \.\.\. .* \.\.\. FAIL') self.assertRegexpMatches(text, 'MockTINCTestCaseForResults.test_error \.\.\. .* \.\.\. ERROR') self.assertRegexpMatches(text, 'MockTINCTestCaseForResults.test_skip \.\.\. .* \.\.\. skipped .*')
def run_test( self, method_name, skip_expected, test_case_prefix='tinctest.test.test_version_check.MockTINCTestCaseWithGetVersion' ): loader = TINCTestLoader() tinc_test_suite = loader.loadTestsFromName( '%s.%s' % (test_case_prefix, method_name), dryrun=True) with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTextTestResult(buffer, True, 1) tinc_test_suite.run(tinc_test_result) text = buffer.getvalue() self.assertEqual(tinc_test_result.testsRun, 1) self.assertEqual(len(tinc_test_result.failures), 0) if skip_expected: self.assertEqual(len(tinc_test_result.skipped), 1) else: self.assertEqual(len(tinc_test_result.skipped), 0) self.assertEqual(len(tinc_test_result.errors), 0)
def test_run_test_with_data_provider_verify_data(self): tinc_test_loader = tinctest.TINCTestLoader() tinc_test_suite = tinc_test_loader.loadTestsFromName('tinctest.test.test_data_provider.MockTINCTestCaseWithDataProviderFailure.test_failure_with_data_provider') for test in tinc_test_suite._tests: test.__class__.__unittest_skip__ = False pwd = os.path.dirname(inspect.getfile(self.__class__)) for file in os.listdir(pwd): if fnmatch.fnmatch(file, '*.out'): os.remove(os.path.join(pwd, file)) test_file1 = os.path.join(pwd, 'type1.out') test_file2 = os.path.join(pwd, 'type2.out') with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) # This should have run two tests self.assertEquals(tinc_test_result.testsRun, 2) self.assertEquals(len(tinc_test_result.failures), 2) self.assertTrue(os.path.exists(test_file1)) self.assertTrue(os.path.exists(test_file2))
def test_failure_gather_logs(self): test_loader = tinctest.TINCTestLoader() test_suite = test_loader.loadTestsFromName('mpp.models.regress.mpp_tc.regress_mpp_test_case.MockMPPTestCase.test_gather_logs') self.assertIsNotNone(test_suite) self.assertTrue(len(test_suite._tests), 1) for test in test_suite._tests: test.__class__.__unittest_skip__ = False with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_runner = tinctest.TINCTestRunner(stream = buffer, descriptions = True, verbosity = 1) test_result = tinc_test_runner.run(test_suite) self.assertEqual(test_result.testsRun, 1) self.assertEqual(len(test_result.errors), 0) self.assertEqual(len(test_result.skipped), 0) self.assertEqual(len(test_result.failures), 1) expected_log_file = os.path.join(MockMPPTestCase.get_out_dir(), test._testMethodName + '.logs') self.assertTrue(os.path.exists(expected_log_file)) self.assertTrue(os.path.getsize(expected_log_file) > 0)
class SQLTestCaseSkipTests(unittest.TestCase): def test_skip_tag_in_sql_file(self): test_case = MockSQLTestCaseForSkip('test_query01') self.assertEqual(test_case.name, "MockSQLTestCaseForSkip.test_query01") self.assertEqual(test_case.skip, 'demonstrating skipping') def test_skip_when_tag_in_sql_file(self): test_loader = TINCTestLoader() test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseForSkip) test_case = None for case in test_suite._tests: if case.name == "MockSQLTestCaseForSkip.test_query01": test_case = case self.assertNotEqual(test_case, None) self.assertEqual(test_case.name, "MockSQLTestCaseForSkip.test_query01") with closing(_WritelnDecorator(StringIO())) as buffer: test_result = TINCTextTestResult(buffer, True, 1) test_case.run(test_result) self.assertEqual(test_result.testsRun, 1) self.assertEqual(len(test_result.failures), 0) self.assertEqual(len(test_result.skipped), 1) self.assertEqual(len(test_result.errors), 0)
def test_run_test_with_data_provider_verify_data(self): tinc_test_loader = tinctest.TINCTestLoader() tinc_test_suite = tinc_test_loader.loadTestsFromName( 'tinctest.test.test_data_provider.MockTINCTestCaseWithDataProviderFailure.test_failure_with_data_provider' ) for test in tinc_test_suite._tests: test.__class__.__unittest_skip__ = False pwd = os.path.dirname(inspect.getfile(self.__class__)) for file in os.listdir(pwd): if fnmatch.fnmatch(file, '*.out'): os.remove(os.path.join(pwd, file)) test_file1 = os.path.join(pwd, 'type1.out') test_file2 = os.path.join(pwd, 'type2.out') with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1) tinc_test_suite.run(tinc_test_result) # This should have run two tests self.assertEquals(tinc_test_result.testsRun, 2) self.assertEquals(len(tinc_test_result.failures), 2) self.assertTrue(os.path.exists(test_file1)) self.assertTrue(os.path.exists(test_file2))
def test_sanity_run(self): with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_result = tinctest.TINCTextTestResult(buffer, True, 1) tinc_test_suite = tinctest.TINCTestSuite() tinc_test_suite.addTest(MockTINCTestCase('test_do_stuff')) tinc_test_suite.run(tinc_test_result)
def test_custom_result_add_success(self): test_loader = tinctest.TINCTestLoader() tinc_test_suite = test_loader.loadTestsFromName( 'tinctest.test.test_core.MockTINCTestCaseWithCustomResult') for test in tinc_test_suite._tests: if not 'test_skip' in test.name: test.__class__.__unittest_skip__ = False with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_runner = TINCTestRunner(stream=buffer, descriptions=True, verbosity=1) tinc_test_runner.run(tinc_test_suite) count = 0 for test in tinc_test_suite._tests: if 'test_success' in test.name: self.assertEqual(test._my_result._my_pass, 1) self.assertEqual(test._my_result._my_errors, 0) self.assertEqual(test._my_result._my_failure, 0) self.assertEqual(test._my_result._my_skip, 0) self.assertEqual(test._my_result._my_expected_failure, 0) self.assertEqual(test._my_result._my_unexpected_success, 0) self.assertEqual(len(test._my_result.result_detail), 1) self.assertTrue('success' in test._my_result.result_detail) count += 1 if 'test_failure' in test.name: self.assertEqual(test._my_result._my_pass, 0) self.assertEqual(test._my_result._my_errors, 0) self.assertEqual(test._my_result._my_failure, 1) self.assertEqual(test._my_result._my_skip, 0) self.assertEqual(test._my_result._my_expected_failure, 0) self.assertEqual(test._my_result._my_unexpected_success, 0) self.assertEqual(len(test._my_result.result_detail), 1) self.assertTrue('failures' in test._my_result.result_detail) count += 1 if 'test_error' in test.name: self.assertEqual(test._my_result._my_pass, 0) self.assertEqual(test._my_result._my_errors, 1) self.assertEqual(test._my_result._my_failure, 0) self.assertEqual(test._my_result._my_skip, 0) self.assertEqual(test._my_result._my_expected_failure, 0) self.assertEqual(test._my_result._my_unexpected_success, 0) self.assertEqual(len(test._my_result.result_detail), 1) self.assertTrue('errors' in test._my_result.result_detail) count += 1 if 'test_expectedfailure' in test.name: self.assertEqual(test._my_result._my_pass, 0) self.assertEqual(test._my_result._my_errors, 0) self.assertEqual(test._my_result._my_failure, 0) self.assertEqual(test._my_result._my_skip, 0) self.assertEqual(test._my_result._my_expected_failure, 1) self.assertEqual(test._my_result._my_unexpected_success, 0) self.assertEqual(len(test._my_result.result_detail), 1) self.assertTrue( 'expected_failure' in test._my_result.result_detail) count += 1 if 'test_error' in test.name: self.assertEqual(test._my_result._my_pass, 0) self.assertEqual(test._my_result._my_errors, 1) self.assertEqual(test._my_result._my_failure, 0) self.assertEqual(test._my_result._my_skip, 0) self.assertEqual(test._my_result._my_expected_failure, 0) self.assertEqual(test._my_result._my_unexpected_success, 0) self.assertEqual(len(test._my_result.result_detail), 1) self.assertTrue('errors' in test._my_result.result_detail) count += 1 if 'test_skip' in test.name: self.assertEqual(test._my_result._my_pass, 0) self.assertEqual(test._my_result._my_errors, 0) self.assertEqual(test._my_result._my_failure, 0) self.assertEqual(test._my_result._my_skip, 1) self.assertEqual(test._my_result._my_expected_failure, 0) self.assertEqual(test._my_result._my_unexpected_success, 0) count += 1 self.assertEqual(len(test._my_result.result_detail), 1) self.assertTrue('skip' in test._my_result.result_detail) self.assertEqual(count, 6)
def __init__(self, stream, descriptions, verbosity, immediate_stream): super(EnhancedTextTestResult, self).__init__(stream, descriptions, verbosity) self.xunit = Xunit('TestResults.xml') self.xunit._exc_info_to_string = self._exc_info_to_string self.immediate_stream = _WritelnDecorator(immediate_stream) self.last_test = None
def test_custom_result_add_success(self): test_loader = tinctest.TINCTestLoader() tinc_test_suite = test_loader.loadTestsFromName('tinctest.test.test_core.MockTINCTestCaseWithCustomResult') for test in tinc_test_suite._tests: if not 'test_skip' in test.name: test.__class__.__unittest_skip__ = False with closing(_WritelnDecorator(StringIO())) as buffer: tinc_test_runner = TINCTestRunner(stream = buffer, descriptions = True, verbosity = 1) tinc_test_runner.run(tinc_test_suite) count = 0 for test in tinc_test_suite._tests: if 'test_success' in test.name: self.assertEqual(test._my_result._my_pass, 1) self.assertEqual(test._my_result._my_errors, 0) self.assertEqual(test._my_result._my_failure, 0) self.assertEqual(test._my_result._my_skip, 0) self.assertEqual(test._my_result._my_expected_failure, 0) self.assertEqual(test._my_result._my_unexpected_success, 0) self.assertEqual(len(test._my_result.result_detail), 1) self.assertTrue('success' in test._my_result.result_detail) count += 1 if 'test_failure' in test.name: self.assertEqual(test._my_result._my_pass, 0) self.assertEqual(test._my_result._my_errors, 0) self.assertEqual(test._my_result._my_failure, 1) self.assertEqual(test._my_result._my_skip, 0) self.assertEqual(test._my_result._my_expected_failure, 0) self.assertEqual(test._my_result._my_unexpected_success, 0) self.assertEqual(len(test._my_result.result_detail), 1) self.assertTrue('failures' in test._my_result.result_detail) count += 1 if 'test_error' in test.name: self.assertEqual(test._my_result._my_pass, 0) self.assertEqual(test._my_result._my_errors, 1) self.assertEqual(test._my_result._my_failure, 0) self.assertEqual(test._my_result._my_skip, 0) self.assertEqual(test._my_result._my_expected_failure, 0) self.assertEqual(test._my_result._my_unexpected_success, 0) self.assertEqual(len(test._my_result.result_detail), 1) self.assertTrue('errors' in test._my_result.result_detail) count += 1 if 'test_expectedfailure' in test.name: self.assertEqual(test._my_result._my_pass, 0) self.assertEqual(test._my_result._my_errors, 0) self.assertEqual(test._my_result._my_failure, 0) self.assertEqual(test._my_result._my_skip, 0) self.assertEqual(test._my_result._my_expected_failure, 1) self.assertEqual(test._my_result._my_unexpected_success, 0) self.assertEqual(len(test._my_result.result_detail), 1) self.assertTrue('expected_failure' in test._my_result.result_detail) count += 1 if 'test_error' in test.name: self.assertEqual(test._my_result._my_pass, 0) self.assertEqual(test._my_result._my_errors, 1) self.assertEqual(test._my_result._my_failure, 0) self.assertEqual(test._my_result._my_skip, 0) self.assertEqual(test._my_result._my_expected_failure, 0) self.assertEqual(test._my_result._my_unexpected_success, 0) self.assertEqual(len(test._my_result.result_detail), 1) self.assertTrue('errors' in test._my_result.result_detail) count += 1 if 'test_skip' in test.name: self.assertEqual(test._my_result._my_pass, 0) self.assertEqual(test._my_result._my_errors, 0) self.assertEqual(test._my_result._my_failure, 0) self.assertEqual(test._my_result._my_skip, 1) self.assertEqual(test._my_result._my_expected_failure, 0) self.assertEqual(test._my_result._my_unexpected_success, 0) count += 1 self.assertEqual(len(test._my_result.result_detail), 1) self.assertTrue('skip' in test._my_result.result_detail) self.assertEqual(count, 6)