def LoadTestsFromModule(self, module, suite_name=None): suite_name = suite_name if suite_name else '' # module.__name__ suite_description = module.__doc__ test_suite = TestSuite(suite_name, suite_description) for attr_name in dir(module): attr = getattr(module, attr_name) if isinstance(attr, TestCase): test_suite.AddTestCase(attr) self.LoadTestSuite(test_suite)
def run(self): testsuites = self.tfs.get_testsuites() self.__create_tfs_features_path(self.features_path) with open(self.features_path + '/sequence.featureset', 'w') as f: for testsuite in testsuites: ts = TestSuite(self.tfs, testsuite) new_filename = ts.write_feature_file(self.features_path) f.write(new_filename + '\n') f.close()
def testSuite(self): suite= TestSuite() suite.add(WasRun("testMethod")) suite.add(WasRun("testBrokenMethod")) suite.run(self.result) assert("2 run, 1 failed" == self.result.summary()) print('testSuite done')
def run_tests(): # test reverse_list 01 test_name = 'reverse_list 01' test_list = ['a', 'b', 'c', 'd'] test.assert_equals(reverse_list(test_list), ['d', 'c', 'b', 'a'], test_name) # test reverse_list 02 test_list = ['aa', 'bb', 'cc', 'dd'] test.assert_equals(reverse_list(test_list), ['dd', 'cc', 'bb', 'aa']) # test reverse_list 03 test_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] test.assert_equals(reverse_list(test_list), [0, 9, 8, 7, 6, 5, 4, 3, 2, 1]) # test city_pop_dict 01 country = "Russia" cities = ["Moscow", "Saint Petersburg", "Novosibirsk"] populations = [12432531, 5383890, 1618039] expected = { "Russia": { "Moscow": 12432531, "Saint Petersburg": 5383890, "Novosibirsk": 1618039 } } test.assert_equals(city_pop_dict(country, cities, populations), expected)
def generate_test_cases(self, context_factory, parameterizations=None): """Creates the set of test cases that this test represents. The parameterizations argument should contain a parameterizations registry (keyed by parameterization name) containing values that are instances of a checkers.Parameterization class. Args: context_factory: Callable to create a context instance given a TestCase. parameterizations: (Registry) Parameterizations used to create test cases. Returns: list(TestCase): List of test cases (aka test closures). """ test_cases = registry.AutoKeyRegistry(lambda tc: tc.full_name) if not parameterizations: test_case = TestCase(self, context_factory, description=self.description) test_cases.register(test_case) return test_cases # It is a parameterized test, so we need to generate multiple test cases; # one for each parameterization. for suffix, param in parameterizations.iteritems(): name = '%s_%s' % (self.name, suffix) full_name = '%s_%s' % (self.full_name, suffix) test_case = TestCase(self, context_factory, name=name, full_name=full_name, description=self.description) for key, value in param.variables.iteritems(): test_case.context.variables.register(key, value) for suite_name in param.suites: test_case.test_suites.register(TestSuite(suite_name)) test_cases.register(test_case) return test_cases
def create_system_test_suite(): # Order is important. It represents ascending dependencies between modules. modules = [ system_test_directory, system_test_file ] modules += [ system_test_class_header_template, system_test_class_source_template, system_test_class_tests_template, system_test_license_header_template, system_test_top_level_cmakelists ] modules += [ system_test_project ] modules += [ system_test_implementation_creator, system_test_interface_creator, system_test_class_creator, system_test_command_create_project ] modules += [ system_test_command_create_class, system_test_command_create_implementation, system_test_command_create_interface ] SystemTest.reset("testing") return TestSuite.create_from_modules(modules)
def testSuite(self): suite = TestSuite() suite.add(WasRun("testMethod")) suite.add(WasRun("testBrokenMethod")) suite.run(self.result) assert("2 run, 1 failed" == self.result.summary() )
class TestCaseTest(TestCase): def setUp(self): self.result = TestResult() self.test = WasRun("testMethod") self.suite = TestSuite() def testRunning(self): self.test.run(self.result) assert (self.test.wasRun) def testSetUp(self): self.test.run(self.result) assert (self.test.wasSetUp) def testTemplateMethod(self): self.test.run(self.result) assert ("setUp testMethod tearDown" == self.test.log) def testResult(self): self.test.run(self.result) assert ("1 run, 0 failed" == self.result.summary()) def testFailedResult(self): test = WasRun("testBrokenMethod") test.run(self.result) assert ("1 run, 1 failed" == self.result.summary()) def testSuite(self): self.suite.add(WasRun("testMethod")) self.suite.add(WasRun("testBrokenMethod")) self.suite.run(self.result) assert ("2 run, 1 failed" == self.result.summary())
def main(): """ """ # data = credit.dataset_31_credit_g() data = wine.wine_quality_red_csv() print(data.shape) print(data.columns) target = "class" X, y = data[[col for col in data.columns if col != target]], data[target] X_train, X_test, y_train, y_test = split(X, y, test_size=0.2, random_state=0) # pipeline = CreditGPipeline() pipeline = WineQualityPipeline() classifier = RandomForest(size=40) model = pipeline.with_estimator(classifier).fit(X_train, y_train) prediction = model.predict(X_test) print(accuracy_score(y_test, prediction)) suite = TestSuite() automated_suite = AutomatedTestSuite() data_profile = DataFrameProfiler().on(X_train) pipeline_profile = SklearnPipelineProfiler().on(model) suite.add(Test().is_complete( data_profile.for_column('volatile_acidity')).is_in_range( data_profile.for_column('alcohol'))) warnings = suite.on(X_test) print("*** TEST_SUITE, X_TEST") if warnings and (len(warnings) != 0): print("======= WARNINGS =======") for warn in warnings: print(warn) error_generator = ExplicitMissingValues() corrupted_X_test = error_generator.run(X_test, ['volatile_acidity']) warnings = suite.on(corrupted_X_test) print("*** TEST_SUITE, CORRUPTED_X_TEST") if warnings and (len(warnings) != 0): print("======= WARNINGS =======") for warn in warnings: print(warn) tests, warnings = (automated_suite.with_profiles( data_profile, pipeline_profile).run(corrupted_X_test)) print("*** AUTOMATED_TEST_SUITE, CORRUPTED_X_TEST") if warnings and (len(warnings) != 0): print("======= WARNINGS =======") for warn in warnings: print(warn)
def main(): try: # set up and parse arguments parser = argparse.ArgumentParser(description='Mozilla Powertool') parser.add_argument('-d', '--device', type=str, nargs='+', choices=['yocto','mozilla'], required=True, help="specify ammeter device to use") parser.add_argument('-p', '--path', type=str, default=None, help="specify path to ammeter device (e.g. /dev/ttyACM0)") parser.add_argument('-u', '--ui', type=str, default='tk', choices=['tk','cli', 'web', 'simple'], help="specify which UI to use") parser.add_argument('-f', '--file', type=str, default=None, help="test run config file") parser.add_argument('-o', '--out', type=str, default=None, help="output data file") parser.add_argument('-b', '--begin_experiments', action="store_true", help="begin experiments as soon as the UI has been displayed") parser.add_argument('-s', '--show', type=str, default='current', help="name of the sample source to display") args = parser.parse_args() # create the sample source source = SampleSource.create( args.device, args.path ) # create the test suite suite = TestSuite.create( args.file ) # add the sample source suite.addSource( source ) # create the saver saver = TestSuiteSaver.create( args.out ) # create the displayer ui = UI.create( args.ui, suite, args.begin_experiments, args.show ) # run the app ui.run() # save the data saver.save( suite ) # shut down the sample source source.close() sys.exit(0) except Exception, e: frame = inspect.trace()[-1] print >> sys.stderr, "\nException:\n from %s, line %d:\n %s\n" % (frame[1], frame[2], e) parser.print_help() sys.exit(1)
def main(): try: # set up and parse arguments parser = argparse.ArgumentParser(description='Mozilla Powertool') parser.add_argument('-d', '--device', type=str, default=['mozilla','yocto'], choices=['yocto','mozilla'], action='append', help="specify ammeter device to use") parser.add_argument('-p', '--path', type=str, default=None, help="specify path to ammeter device (e.g. /dev/ttyACM0)") parser.add_argument('-u', '--ui', type=str, required=True, choices=['tk','cli'], default='cli', help="specify which UI to use") parser.add_argument('-f', '--file', type=str, default=None, help="test run config file") parser.add_argument('-o', '--out', type=str, default=None, help="output data file") parser.add_argument('-s', '--show', type=str, default=None, help="name of the sample source to display") args = parser.parse_args() # create the sample source source = SampleSource.create( args.device, args.path ) # create the test suite suite = TestSuite.create( args.file ) # add the sample source suite.addSource( source ) # create the saver saver = TestSuiteSaver.create( args.out ) # create the displayer ui = UI.create( args.ui, suite, args.show ) # run the app ui.run() # save the data saver.save( suite ) # shut down the sample source source.close() sys.exit(0) except Exception, e: print("\nERROR: %s\n" % e) parser.print_help() sys.exit(1)
def create_unit_test_suite(): # Order is important. It represents ascending dependencies between modules. modules = [test_file_template] modules += [ test_class_header_template, test_class_source_template, test_class_tests_template, test_license_header, test_license_header_template, test_project_paths, test_top_level_cmakelists ] modules += [ test_class_header, test_class_source, test_class_tests, test_interface_header ] modules += [ test_implementation_header, test_implementation_source, test_implementation_tests ] modules += [test_command_create_project] modules += [test_command_parser] return TestSuite.create_from_modules(modules)
#!/usr/bin/env python import os, sys basedir = os.path.abspath(os.path.dirname(__file__)) libdir = os.path.abspath(os.path.join(basedir, '../lib')); libtest = os.path.abspath(os.path.join(basedir, '../lib/test')); sys.path.append(libdir) sys.path.append(libtest) import logging as log log.basicConfig(format='%(message)s', level=log.DEBUG) from test_suite import TestSuite ts = TestSuite() ts.run() ts.save()
def setUp(self): self.result = TestResult() self.test = WasRun("testMethod") self.suite = TestSuite()
def CreateTestSuite(self, suite_name=None, suite_description=''): suite_name = suite_name if suite_name else TestRun._DEFAULT_SUITE_NAME if suite_name not in self.suites: self.suites[suite_name] = TestSuite(suite_name, suite_description)
def main(): dir = os.getcwd() + "/App/" net_assigned = False try: # Read testing arguments target_classifier = sys.argv[1] network_attack = sys.argv[2] adversarial_attack = sys.argv[3] training_dir = dir + "nids_config/training_status.txt" # Configure and launch Mininet setLogLevel('info') net = launch_network() net_assigned = True #Launch NIDS try: with open(training_dir, 'w') as training_status: training_status.write("") except: logging.error('Unable to open training status directory') launch_nids(dir, target_classifier) print("\n") # Initialise test suite and execute attack ts = TestSuite(dir, network_attack, adversarial_attack, net) result = ts.run_test() time.sleep(5) results_dir = dir + "TestManager/test_results/results.txt" try: with open(results_dir, "a+") as results: # Write results based on what the test has returned output = result results.write(str(output)) results.write("\n") except: logging.error('Unable to open and write to results file') except KeyboardInterrupt: print("Stopping Neptune..") os.system('sudo pkill -f Neptune/main.py') print("\nStopping network..\n") if net_assigned: net.stop() sys.exit(0) finally: print("Stopping Neptune..") os.system('sudo pkill -f Neptune/main.py') if net_assigned: print("\nStopping network..\n") net.stop() time.sleep(5) with open(training_dir, 'w') as training_status: training_status.write("")
def testTemplateMethod(self): self.test.run(self.result) assert ("setUp testMethod tearDown" == self.test.log) def testResult(self): self.test.run(self.result) assert ("1 run, 0 failed" == self.result.summary()) def testFailedResult(self): test = WasRun("testBrokenMethod") test.run(self.result) assert ("1 run, 1 failed" == self.result.summary()) def testSuite(self): self.suite.add(WasRun("testMethod")) self.suite.add(WasRun("testBrokenMethod")) self.suite.run(self.result) assert ("2 run, 1 failed" == self.result.summary()) suite = TestSuite() suite.add(TestCaseTest("testTemplateMethod")) suite.add(TestCaseTest("testResult")) suite.add(TestCaseTest("testFailedResultFormatting")) suite.add(TestCaseTest("testFailedResult")) suite.add(TestCaseTest("testSuite")) result = TestResult() suite.run(result) print(result.summary())
mcap_iface = "org.bluez.mcap" # bdaddr = "00:80:98:E7:36:9f" bdaddr = "00:80:98:E7:36:85" cpsm = 0x1001 dpsm = 0x1003 mdepid = 1 conf = 0x00 hci_interface = None print "Bdaddr", bdaddr session = None mcl = None csp_caps = None last_mdl = None mcap_tests = TestSuite(mcap_suites) def object_signal(*args, **kwargs): global mcl global last_mdl if 'member' not in kwargs: return sig_name = kwargs['member'] if sig_name == "Recv": mdl, data = args print "Received data in mdl", mdl print "Data:", data elif sig_name == "MCLConnected": tmp_mcl, _ = args if mcl != None: print "Mcl is already connected"
#!/usr/bin/env python import os, sys basedir = os.path.abspath(os.path.dirname(__file__)) libdir = os.path.abspath(os.path.join(basedir, '../lib')) libtest = os.path.abspath(os.path.join(basedir, '../lib/test')) sys.path.append(libdir) sys.path.append(libtest) import logging as log log.basicConfig(format='%(message)s', level=log.DEBUG) from test_suite import TestSuite ts = TestSuite() ts.run() ts.save()
from test_suite import TestSuite ts = TestSuite() #ts.set_test_params(0, 'prince', 0, 2009, 1, 5, 10, 0) ts.set_test_params(1, 'prince', 0, 2009, 3, 10, 10, 2) ts.battery() #ts.battery(0, 'prince', 2009, 20, 20, 10, 0)
def testSuiteContainsFailingSetup(self): suite = TestSuite() suite.add(BrokenSetup("testMethod")) suite.run(self.result) assert ("1 run, 1 failed" == self.result.summary())
with open(os.path.join(dir_path, "PaySim.properties")) as file: content = file.readlines() properties = dict() for line in content[1:]: pair = line.split("=") properties[pair[0]] = pair[1] #os.system("java -jar {jar_path} -file PaySim.properties 1 off".format(jar_path=file_path)) simulation_name = os.listdir(os.path.join(dir_path, "outputs"))[-1] rawLog = pd.read_csv( os.path.join(dir_path, "outputs", simulation_name, "{}_rawLog.csv".format(simulation_name))) test_suite = TestSuite(rawLog, properties) for test in test_suite.tests: console_string = test(test_suite) nice_printing(console_string) template_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "templates") css_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "css") env = Environment(loader=FileSystemLoader(template_path)) template = env.get_template("report.html") template.stream(test_suite.template_vars).dump("output.html") html_output = template.render(test_suite.template_vars) weasyprint.HTML(string=html_output).write_pdf( "report.pdf", stylesheets=[os.path.join(css_path, "typography.css")])
from test_suite import TestSuite ts = TestSuite() ts.set_test_params(1, 'prince', 1, 2009, 10, 10, 20, 0) ts.battery() #ts.battery(1, 'prince_c', 2009, 20, 20, 10, 0)
suite = TestSuite() suite.add(BrokenSetup("testMethod")) suite.run(self.result) assert ("1 run, 1 failed" == self.result.summary()) def testSuite(self): suite = TestSuite() suite.add(WasRun("testMethod")) suite.add(WasRun("testBrokenMethod")) suite.run(self.result) assert ("2 run, 1 failed" == self.result.summary()) def tearDownIfFailed(self): test = WasRun("testBrokenMethod") test.run(self.result) suite = TestSuite() suite.add(TestCaseTest("testTemplateMethod")) suite.add(TestCaseTest("testResult")) suite.add(TestCaseTest("testFailedResultFormatting")) suite.add(TestCaseTest("testFailedResult")) suite.add(TestCaseTest("testSuite")) suite.add(TestCaseTest("setupFailed")) suite.add(TestCaseTest("testSuiteContainsFailingSetup")) suite.add(TestCaseTest("tearDownIfFailed")) result = TestResult() suite.run(result) print(result.summary())
def testFailedResult(self): test = WasRun("testBrokenMethod") test.run(self.result) assert("1 run, 1 failed" == self.result.summary() ) def testFailedResultFormatting(self): result.testStarted() result.testFailed() assert("1 run, 1 failed" == self.result.summary() ) def testSuite(self): suite = TestSuite() suite.add(WasRun("testMethod")) suite.add(WasRun("testBrokenMethod")) suite.run(self.result) assert("2 run, 1 failed" == self.result.summary() ) suite = TestSuite() suite.add(TestCaseTest("testTemplateMethod")) suite.add(TestCaseTest("testResult")) suite.add(TestCaseTest("testFailedResult")) suite.add(TestCaseTest("testFailedResultFormatting")) suite.add(TestCaseTest("testSuite")) result = TestResult() suite.run(result) print result.summary()
def discover(self, path): test_suite = TestSuite() for api_file in self.collect(path): test_case = TestCase(api_file) test_suite.add_case(test_case) return test_suite
from test_suite import TestSuite ts = TestSuite() # ts.set_test_params(1, 'prince', 1, 2009, 10, 10, 20, 0) ts.set_test_params(0, 'prince', 1, 2009, 20, 16, 1, 3) ts.battery() # ts.battery(1, 'prince_c', 2009, 20, 20, 10, 0)