def __test_copy_log_speced_per_id(self, ztestsuite, localhost_log_file, fetch_logs_flag = True): """ base test method containing common code called by public test methods for testing execution of copy of logs based on function signatures """ import tempfile runtime.reset_collector() #create the log file on "remote" which is actually localhost with open( localhost_log_file, 'wb') as f: f.write("This is a log") runner = TestRunner(ztestsuite=ztestsuite, config_overrides={"should_fetch_logs":fetch_logs_flag}) logs_dir = tempfile.mkdtemp() runner.set_logs_dir(logs_dir) try: runner.run() #no logs specified on default, so should not have any files if fetch_logs_flag: self.assertEqual( os.listdir(logs_dir), ['ztestsuite.unittest-' + os.path.basename(localhost_log_file)]) else: self.assertEqual( os.listdir(logs_dir),[]) except: raise finally: #cleanup shutil.rmtree( logs_dir)
def __test_copy_log_speced_per_id(self, ztestsuite, localhost_log_file, fetch_logs_flag=True): """ base test method containing common code called by public test methods for testing execution of copy of logs based on function signatures """ import tempfile runtime.reset_collector() #create the log file on "remote" which is actually localhost with open(localhost_log_file, 'wb') as f: f.write("This is a log") runner = TestRunner( ztestsuite=ztestsuite, config_overrides={"should_fetch_logs": fetch_logs_flag}) logs_dir = tempfile.mkdtemp() runner.set_logs_dir(logs_dir) try: runner.run() #no logs specified on default, so should not have any files if fetch_logs_flag: self.assertEqual(os.listdir(logs_dir), [ 'ztestsuite.unittest-' + os.path.basename(localhost_log_file) ]) else: self.assertEqual(os.listdir(logs_dir), []) except: raise finally: #cleanup shutil.rmtree(logs_dir)
def test_full_run_ztestsuite(self): """ Tests the new use of ztest and zetestsuite :return: """ runtime.reset_collector() ztestsuite = SampleTestSuite() ztestsuite.zopkio()
def test_full_run_parallel(self): """ Tests a full run with parallel tests """ runtime.reset_collector() test_file = os.path.join(self.FILE_LOCATION, "samples/sample_test_with_naarad_run_tests_in_parallel.py") test_runner = TestRunner(test_file, ["test0", "test1", "test2"], {}, reporter_type='junit_reporter') test_runner.run()
def test_full_run(self): """ Tests a full run """ runtime.reset_collector() test_file = os.path.join(self.FILE_LOCATION, "samples/sample_test_with_naarad.py") test_runner = TestRunner(test_file, ["test0", "test1", "test2"], {}) test_runner.run()
def test_full_run_with_skip(self): """ Tests failing setup for one test and having it skip """ runtime.reset_collector() test_file = os.path.join(self.FILE_LOCATION, "samples/sample_test_fail_first_setup.py") test_runner = TestRunner(test_file, None, {"max_failures_per_suite_before_abort": -1}, reporter_type='junit_reporter') test_runner.run()
def test_full_run_with_skip_and_stop_all_config(self): """ Tests failing consectutive setup_suites and skipping the rest of the configurations. """ runtime.reset_collector() test_file = os.path.join(self.FILE_LOCATION, "samples/sample_test_fail_setup_suite.py") test_runner = TestRunner(test_file, None, {"max_suite_failures_before_abort": 0}, reporter_type='junit_reporter') test_runner.run()
def test_full_run_with_skip_and_stop_one_config(self): """ Tests failing consecutive setups and having the entire execution of a configuration stop. """ runtime.reset_collector() test_file = os.path.join(self.FILE_LOCATION, "samples/sample_test_fail_all_setup.py") test_runner = TestRunner(test_file, None, {"max_failures_per_suite_before_abort": 0}, reporter_type='junit_reporter') test_runner.run()