class TestModule(unittest.TestCase): def setUp(self): self.run = MockRun() self.results = Results(self.run) def testResultsIsEmptyBeforeRecorded(self): self.assertEqual([], self.results.results) def testValidResultsCanBeRecorded(self): self.results.record_result(50, 0.10834) self.results = [(50, 0.10834)] def testInvalidResultsRaisesError(self): with self.assertRaises(AttributeError): self.results.record_result(50.1, 0.1) self.assertEqual([], self.results.results) def testCanBeInitialisedWithResults(self): results = [(34234, 32084)] self.results = Results(self.run, results) self.assertEqual(results, self.results.results) def testReturnCodeIsNoneBeforeRecorded(self): self.assertIsNone(self.results.return_code) def testReturnCodeCanBeRecorded(self): self.results.return_code = 1 self.assertEqual(1, self.results.return_code) def testCanAccessRun(self): self.assertEqual(self.run, self.results.run)
def train_classifier(args): ''' Function that performs the detection of Parkinson :param args: Input arguments :return: ''' exp_name = args.exp_name subfolder = os.path.join( args.output, exp_name + '_' + datetime.datetime.now().strftime("%m_%d"), datetime.datetime.now().strftime("%H_%M")) file_result_patients = os.path.join(subfolder, 'res_pat.csv') file_result_segments = os.path.join(subfolder, 'res_seg.csv') model_file = os.path.join(subfolder, "model.json") if not os.path.exists(subfolder): os.makedirs(subfolder) val_results = Results(file_result_segments, file_result_patients) datas = Data(args.input_data, 1, 100, pk_level=False) for i in range(0, 10): lr = 0.001 model = multiple_cnn1D(datas.X_data.shape[2]) model_json = model.to_json() with open(model_file, "w") as json_file: json_file.write(model_json) print('fold', str(i)) datas.separate_fold(i) log_filename = os.path.join(subfolder, "training_" + str(i) + ".csv") w_filename = os.path.join(subfolder, "weights_" + str(i) + ".hdf5") model = train(model, datas, lr, log_filename, w_filename) print('Validation !!') val_results.validate_patient(model, datas.X_val, datas.y_val, datas.count_val)
def main(): target = Target() test_results = Results(target) test_results.log( target.execute('cat /sys/kernel/debug/sched_features | grep _WA_ ', as_root=True)) tests_to_run = [] # tests_to_run = ['SysBenchCpu', 'SysBenchMemory'] # tests_to_run = ['PerfBenchFutexWake'] time = 60 events = [ 'sched:sched_migrate_task', 'sched:sched_stick_numa', 'sched:sched_move_numa', 'sched:sched_swap_numa' ] events.extend(['migrate:mm_migrate_pages']) for test in test_registry.test_registry: if not tests_to_run or test(target).__class__.__name__ in tests_to_run: t = test(target, time=time) t.run_event(target, events, test_results) res_files = [] res_files.append(test_results.store('Res1')) res_files.append(test_results.store('Res2')) create_report(res_files, 'Report', 'Test Run')
def run_tests(test_name, target, tests_to_run, iter=0): test_results = Results(target) time = 60 event = [] for test in test_registry.test_registry: if not tests_to_run or test(target).__class__.__name__ in tests_to_run: t = test(target, time=time) t.run(target, test_results) return test_results.store(test_name, iteration=iter)
def run_single_test(result_name, test_name, target, res_files, iter=1): test_results = Results(target) test_time = 60 event = [] for test in test_registry.test_registry: if test(target).__class__.__name__ == test_name: for i in range(0, iter): t = test(target, time=test_time) time.sleep(1) t.run(target, test_results) f = test_results.store(result_name + '-' + test_name, iteration=0) res_files.append(f)
def run_tests(test_name, target, tests_to_run): test_results = Results(target) sched_features = target.execute( 'cat /sys/kernel/debug/sched_features | grep WA_ ', as_root=True) for f in sched_features.split(" "): if "WA_" in f: test_results.log(f) for test in test_registry.test_registry: if not tests_to_run or test(target).__class__.__name__ in tests_to_run: test(target).run_event(target, 'sched:sched_migrate_task', test_results) return test_results.store(test_name)
def main(): target = Target() test_results = Results(target) for test in test_registry.test_registry: t = test(target, time=60) print(t.command)
def ablation_study(args): ''' Function that performs the ablation study :param args: Input arguments :return: ''' features = np.arange(1, 19) folder = os.path.join( args.output, args.exp_name + '_' + datetime.datetime.now().strftime("%m_%d"), datetime.datetime.now().strftime("%H_%M")) if not os.path.exists(folder): os.makedirs(folder) for j in range(1, 9): exp_name = args.exp_name + str(j) subfolder = os.path.join(folder, 'feature_' + str(j)) file_result_patients = os.path.join(subfolder, 'res_pat.csv') file_result_segments = os.path.join(subfolder, 'res_seg.csv') #filename = subfolder + "weights.hdf5" model_file = os.path.join(subfolder, "model.json") if not os.path.exists(subfolder): os.makedirs(subfolder) feature_delete = j feature_delete_r = j + 9 features_i = np.delete(features, [feature_delete, feature_delete_r]) val_results = Results(file_result_segments, file_result_patients) #datas = Datas(args.input_data, 1, 100, features=features_i) #datas.load(norm=None) datas = Data(args.input_data, 1, 100, pk_level=False) for i in range(0, 10): lr = 0.001 print('fold', str(i)) log_filename = os.path.join(subfolder, "training_" + str(i) + ".csv") w_filename = os.path.join(subfolder, "weights_" + str(i) + ".hdf5") datas.separate_fold(i) model = multiple_cnn1D(datas.X_data.shape[2]) model_json = model.to_json() with open(model_file, "w") as json_file: json_file.write(model_json) model = train(model, datas, lr, log_filename, w_filename) print('Validation !!!!!!!') val_results.validate_patient(model, datas.X_val, datas.y_val, datas.count_val)
def main(): data = get_data() amending, not_amending = split_data(data) data = remove_titles(data) test, train, validation = split_test_train_validation(set(data.keys())) labels = {k: 1 if k in amending else 0 for k in data.keys()} full_text_documents = data tenth_of_text_documents = {k: get_random_lines(v, 10, percentage=True) for k, v in data.items()} ten_lines_documents = {k: get_random_lines(v, 10) for k, v in data.items()} single_line_documents = {k: get_random_lines(v, 1) for k, v in data.items()} r = Results() svm_classify(full_text_documents, tenth_of_text_documents, ten_lines_documents, single_line_documents, labels, r, test, train, validation) fasttext_classify(full_text_documents, tenth_of_text_documents, ten_lines_documents, single_line_documents, labels, r, test, train, validation) flair_classify(full_text_documents, tenth_of_text_documents, ten_lines_documents, single_line_documents, labels, r, test, train, validation) r.save('results.json')
def train_model(environment, agent, iterations=10000, goal_reward=200, verbose=False, seed=0): environment.environment.seed(seed) np.random.seed(seed) random.seed(seed) result = Results() for i in range(iterations): if verbose and (i % 200) == 0 and i > 1: result.print_summary() episode_reward = run_episode(agent, environment) hit_goal = episode_reward >= goal_reward result.add_result(episode_reward, hit_goal) if hit_goal: print( f"Hit goal in {i} iterations with a reward of {episode_reward}!" ) if verbose: input("Finished training, press here to continue...") for _ in range(5): episode_reward = run_episode(agent, environment, render=True) result.add_result(episode_reward) result.print_summary() agent.close() return result
def showResults(self, queryResults, sql, values): Results(self, queryResults, sql, values).focus()
def testCanBeInitialisedWithResults(self): results = [(34234, 32084)] self.results = Results(self.run, results) self.assertEqual(results, self.results.results)
def setUp(self): self.run = MockRun() self.results = Results(self.run)
def setUp(self): param = Parameters(1) param.p = np.logspace(-6, -4, num=20) figs_dir = "test_figs/" self.res = Results(param, figs_dir)
class ResultsTest(unittest.TestCase): def setUp(self): param = Parameters(1) param.p = np.logspace(-6, -4, num=20) figs_dir = "test_figs/" self.res = Results(param, figs_dir) def test_get_param(self): par = self.res.get_param() self.assertEqual(20, len(par.p)) def test_get_figs_dir(self): f_dir = self.res.get_figs_dir() self.assertEqual("test_figs/", f_dir) def test_get_per_list(self): per = self.res.get_per_list() self.assertEqual(0, len(per)) def test_get_per_conf(self): per_conf = self.res.get_per_conf() self.assertEqual(0, len(per_conf)) def test_get_thrpt_list(self): thrpt = self.res.get_thrpt_list() self.assertEqual(0, len(thrpt)) def test_get_thrpt_conf(self): thrpt_conf = self.res.get_thrpt_conf() self.assertEqual(0, len(thrpt_conf)) def test_store_res_plot(self): per_tpl = (1.0e-05, 1.0e-06) thrpt_tpl = (50, 0.5) # Add len.p PER and Throughput results len_p = len(self.res.get_param().p) for k in range(0, len_p): self.res.store_res(per_tpl, thrpt_tpl) # Check PER length per = self.res.get_per_list() self.assertEqual(len_p, len(per)) # Check PER confidence length per_conf = self.res.get_per_conf() self.assertEqual(len_p, len(per_conf)) # Check thrpt length thrpt = self.res.get_thrpt_list() self.assertEqual(len_p, len(thrpt)) # Check thrpt_conf lenth thrpt_conf = self.res.get_thrpt_conf() self.assertEqual(len_p, len(thrpt_conf)) # Plot theo_per = 1.0e-05 * np.ones([ len_p, ]) theo_thrpt = 50 * np.ones([ len_p, ]) self.res.plot(theo_per, theo_thrpt)
if __name__ == "__main__": """ configuration: all the config data attacks: configure attacks from config data hashcat: calls to hashcat individual attacks results: generates a report with the results """ os.system("") # enable command colors print(Color.cyan("\n...AutoCrackeo...")) arguments = get_arguments() conf = Configuration(arguments.hash_file, arguments.hash_type, arguments.config_file, arguments.extra_params) results = Results(conf.static_values) hashcat = Hashcat(conf.static_values, results) attacks = Attacks(hashcat) if arguments.config_file: # if -c/--config """ Execute a specific selection of hashcat attacks previously defined on the configuration json file This will be updated gradually as the efficiency of the attacks are measured """ # print start datetime start_date = datetime.now() print("\nStart date: " + Results.datetime_to_string(start_date) + "\n") """ Set a SIGINT signal handler to securely skip all the attacks if the user wants to """