def test_run_and_analyze_bias_value(self, logger): """ This method runs the experiment with a bias value and checks if a number of votes was found in order to satisfy an overall desired stability. """ n = 8 experiment = ExperimentMajorityVoteFindVotes( progress_log_prefix=logger.logger_name, parameters=MVParameters(n=n, k=2, challenge_count=2**8, seed_instance=0xC0DEBA5E, seed_instance_noise=0xdeadbeef, transformation='id', combiner='xor', mu=0, sigma=1, sigma_noise_ratio=NoisyLTFArray. sigma_noise_from_random_weights(n, 1, .5), seed_challenges=0xf000, desired_stability=0.95, overall_desired_stability=0.8, minimum_vote_count=1, iterations=2, bias=0.56)) experiment.execute(logger.queue, logger.logger_name) self.assertGreaterEqual( experiment.result.overall_stab, experiment.parameters.overall_desired_stability, 'No vote_count was found.')
def test_run_and_analyze(self, logger): """ This method run the experiment and checks if a number of votes was found in oder to satisfy an overall desired stability. """ n = 8 experiment = ExperimentMajorityVoteFindVotes( log_name=logger.logger_name, n=n, k=2, challenge_count=2**8, seed_instance=0xC0DEBA5E, seed_instance_noise=0xdeadbeef, transformation=LTFArray.transform_id, combiner=LTFArray.combiner_xor, mu=0, sigma=1, sigma_noise_ratio=NoisyLTFArray.sigma_noise_from_random_weights( n, 1, .5), seed_challenges=0xf000, desired_stability=0.95, overall_desired_stability=0.8, minimum_vote_count=1, iterations=2, bias=None) experiment.execute(logger.queue, logger.logger_name) self.assertGreaterEqual(experiment.result_overall_stab, experiment.overall_desired_stability, 'No vote_count was found.')
def test_mv_experiments(self): """This method runs the experimenter with five ExperimentMajorityVoteFindVotes experiments.""" experimenter = Experimenter(LOG_PATH + 'test_mv_experiments') for i in range(5): n = 8 logger_name = LOG_PATH + 'test_mv_exp{0}'.format(i) experiment = ExperimentMajorityVoteFindVotes( progress_log_prefix=logger_name, parameters=MVParameters(n=n, k=2, challenge_count=2**8, seed_instance=0xC0DEBA5E, seed_instance_noise=0xdeadbeef, transformation='id', combiner='xor', mu=0, sigma=1, sigma_noise_ratio=NoisyLTFArray. sigma_noise_from_random_weights( n, 1, .5), seed_challenges=0xf000 + i, desired_stability=0.95, overall_desired_stability=0.8, minimum_vote_count=1, iterations=2, bias=None)) experimenter.queue(experiment) experimenter.run()
def experiments(self): e = [] for i in range(self.RESTARTS): for k in range(self.K_RANGE, self.K_MAX + 1, self.K_RANGE): e.append(ExperimentMajorityVoteFindVotes( progress_log_prefix=None, parameters=Parameters( n=self.LOWERCASE_N, k=k, challenge_count=self.UPPERCASE_N, seed_instance=0xC0DEBA5E + i, seed_instance_noise=0xdeadbeef + i, transformation='id', combiner='xor', mu=0, sigma=1, sigma_noise_ratio=self.S_RATIO, seed_challenges=self.SEED_CHALLENGES + i, desired_stability=self.STAB_C, overall_desired_stability=self.STAB_ALL, minimum_vote_count=1, iterations=self.ITERATIONS, bias=None ) )) return e
def test_mv_experiments(self): """This method runs the experimenter with five ExperimentMajorityVoteFindVotes experiments.""" experiments = [] for i in range(5): n = 8 logger_name = 'test_mv_exp{0}'.format(i) experiment = ExperimentMajorityVoteFindVotes( log_name=logger_name, n=n, k=2, challenge_count=2**8, seed_instance=0xC0DEBA5E, seed_instance_noise=0xdeadbeef, transformation=LTFArray.transform_id, combiner=LTFArray.combiner_xor, mu=0, sigma=1, sigma_noise_ratio=NoisyLTFArray. sigma_noise_from_random_weights(n, 1, .5), seed_challenges=0xf000 + i, desired_stability=0.95, overall_desired_stability=0.8, minimum_vote_count=1, iterations=2, bias=None) experiments.append(experiment) experimenter = Experimenter('test_mv_experimenter', experiments) experimenter.run()
def test_multiprocessing_logs(self): """ This test checks for the predicted amount for result. """ experiments = [] n = 28 for i in range(n): log_name = 'test_multiprocessing_logs{0}'.format(i) lr16_4_1 = ExperimentLogisticRegression(log_name, 8, 2, 2 ** 8, 0xbeef, 0xbeef, LTFArray.transform_id, LTFArray.combiner_xor) experiments.append(lr16_4_1) for i in range(n): log_name = 'test_multiprocessing_logs{0}'.format(i) experiment = ExperimentMajorityVoteFindVotes( log_name=log_name, n=8, k=2, challenge_count=2 ** 8, seed_instance=0xC0DEBA5E, seed_instance_noise=0xdeadbeef, transformation=LTFArray.transform_id, combiner=LTFArray.combiner_xor, mu=0, sigma=1, sigma_noise_ratio=NoisyLTFArray.sigma_noise_from_random_weights(n, 1, .5), seed_challenges=0xf000 + i, desired_stability=0.95, overall_desired_stability=0.8, minimum_vote_count=1, iterations=2, bias=False ) experiments.append(experiment) experimenter = Experimenter('test_multiprocessing_logs', experiments) experimenter.run() def line_count(file_object): """ :param file_object: :return: number of lines """ count = 0 while file_object.readline() != '': count = count + 1 return count paths = list(glob.glob('*.log')) # Check if the number of lines is greater than zero for log_path in paths: exp_log_file = open(log_path, 'r') self.assertGreater(line_count(exp_log_file), 0, 'The experiment log is empty.') exp_log_file.close() # Check if the number of results is correct log_file = open('test_multiprocessing_logs.log', 'r') self.assertEqual(line_count(log_file), n*2, 'Unexpected number of results') log_file.close()
def test_run_and_analyze(self): logger_name = 'log' # Setup multiprocessing logging queue = multiprocessing.Queue(-1) listener = multiprocessing.Process(target=log_listener, args=( queue, setup_logger, logger_name, )) listener.start() n = 8 experiment = ExperimentMajorityVoteFindVotes( log_name=logger_name, n=n, k=2, challenge_count=2**8, seed_instance=0xC0DEBA5E, seed_instance_noise=0xdeadbeef, transformation=LTFArray.transform_id, combiner=LTFArray.combiner_xor, mu=0, sigma=1, sigma_noise_ratio=NoisyLTFArray.sigma_noise_from_random_weights( n, 1, .5), seed_challenges=0xf000, desired_stability=0.95, overall_desired_stability=0.8, minimum_vote_count=1, iterations=2, bias=False) experiment.execute(queue, logger_name) self.assertGreaterEqual(experiment.result_overall_stab, experiment.overall_desired_stability, 'No vote_count was found.') queue.put_nowait(None) listener.join()
def main(args): parser = argparse.ArgumentParser( usage="Experiment to determine the minimum number of votes " "required to achieve a desired given stability.\n") parser.add_argument( "stab_c", help="Desired stability of the challenges.", type=float, choices=[0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]) parser.add_argument( "stab_all", help="Overall desired stability.", type=float, choices=[0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]) parser.add_argument("n", help="Number of bits per Arbiter chain.", type=int, choices=[8, 16, 24, 32, 48, 64, 128]) parser.add_argument("k_max", help="Maximum number of Arbiter chains.", type=int) parser.add_argument( "k_range", help="Number of step size between the number of Arbiter chains", type=int, choices=range(1, 33)) parser.add_argument( "s_ratio", help="Ratio of standard deviation of the noise and weights", type=float) parser.add_argument("N", help="Number of challenges to evaluate", type=int, choices=range(10, 10001, 10)) parser.add_argument("restarts", help="Number of restarts to the entire process", type=int) args = parser.parse_args(args) if args.k_max <= 0: stderr.write("Negative maximum number of Arbiter chains") quit(1) seed_challenges = 0xf000 iterations = 10 n = args.n N = args.N # perform search for minimum number of votes required for each k experiments = [] for i in range(args.restarts): for k in range(args.k_range, args.k_max + 1, args.k_range): log_name = 'exp{0}'.format(k) exp = ExperimentMajorityVoteFindVotes( log_name=log_name, n=n, k=k, challenge_count=N, seed_instance=0xC0DEBA5E + i, seed_instance_noise=0xdeadbeef + i, transformation=LTFArray.transform_id, combiner=LTFArray.combiner_xor, mu=0, sigma=1, sigma_noise_ratio=args.s_ratio, seed_challenges=seed_challenges + i, desired_stability=args.stab_c, overall_desired_stability=args.stab_all, minimum_vote_count=1, iterations=iterations, bias=False) experiments.append(exp) experimenter = Experimenter('mv', experiments) experimenter.run()
def test_multiprocessing_logs(self): """ This test checks for the predicted amount for result. """ experimenter_log_name = LOG_PATH + 'test_multiprocessing_logs' experimenter = Experimenter(experimenter_log_name) n = 4 for i in range(n): log_name = LOG_PATH + 'test_multiprocessing_logs{0}'.format(i) experimenter.queue( ExperimentLogisticRegression( log_name, LRParameters( n=8, k=2, N=2**8, seed_challenge=0xbeef, seed_instance=0xbeef, seed_distance=0xf00, seed_model=0x1, transformation='id', combiner='xor', convergence_decimals=2, mini_batch_size=0, shuffle=False, ))) for i in range(n): log_name = LOG_PATH + 'test_multiprocessing_logs{0}'.format(i) experiment = ExperimentMajorityVoteFindVotes( progress_log_prefix=log_name, parameters=MVParameters(n=8, k=2, challenge_count=2**8, seed_instance=0xC0DEBA5E, seed_instance_noise=0xdeadbeef, transformation='id', combiner='xor', mu=0, sigma=1, sigma_noise_ratio=NoisyLTFArray. sigma_noise_from_random_weights( n, 1, .5), seed_challenges=0xf000 + i, desired_stability=0.95, overall_desired_stability=0.6, minimum_vote_count=1, iterations=2, bias=None)) experimenter.queue(experiment) experimenter.run() def line_count(file_object): """ :param file_object: :return: number of lines """ count = 0 while file_object.readline() != '': count = count + 1 return count paths = list(glob.glob('logs/' + LOG_PATH + '*.log')) # Check if the number of lines is greater than zero for log_path in paths: exp_log_file = open(log_path, 'r') self.assertGreater( line_count(exp_log_file), 0, 'The experiment log {} is empty.'.format(log_path)) exp_log_file.close() # Check if the number of results is correct with open('logs/' + experimenter_log_name + '.log', 'r') as log_file: self.assertEqual(line_count(log_file), 2 * n, 'Unexpected number of results')
def main(args): """ This method starts several experiments in order to find the minimal number of votes required to satisfy an the desired stability of challenges. """ parser = argparse.ArgumentParser( usage="Experiment to determine the minimum number of votes " "required to achieve a desired given stability.\n") parser.add_argument( "stab_c", help="Desired stability of the challenges", type=float, choices=[0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]) parser.add_argument( "stab_all", help="Overall desired stability", type=float, choices=[0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]) parser.add_argument("n", help="Number of bits per Arbiter chain", type=int, choices=[8, 16, 24, 32, 48, 64, 128]) parser.add_argument("k_max", help="Maximum number of Arbiter chains", type=int) parser.add_argument( "k_range", help="Number of step size between the number of Arbiter chains", type=int, choices=range(1, 33)) parser.add_argument( "s_ratio", help="Ratio of standard deviation of the noise and weights", type=float) parser.add_argument("N", help="Number of challenges to evaluate", type=int) parser.add_argument("restarts", help="Number of restarts to the entire process", type=int) parser.add_argument("--log_name", help="Path to the main log file.", type=str, default='my_num_of_votes') args = parser.parse_args(args) if args.k_max <= 0: stderr.write("Negative maximum number of Arbiter chains") quit(1) seed_challenges = 0xf000 iterations = 10 n = args.n N = args.N # perform search for minimum number of votes required for each k experimenter = Experimenter(args.log_name) for i in range(args.restarts): for k in range(args.k_range, args.k_max + 1, args.k_range): log_name = args.log_name + '{0}'.format(k) experimenter.queue( ExperimentMajorityVoteFindVotes( progress_log_prefix=log_name, parameters=Parameters( n=n, k=k, challenge_count=N, seed_instance=0xC0DEBA5E + i, seed_instance_noise=0xdeadbeef + i, transformation='id', combiner='xor', mu=0, sigma=1, sigma_noise_ratio=args.s_ratio, seed_challenges=seed_challenges + i, desired_stability=args.stab_c, overall_desired_stability=args.stab_all, minimum_vote_count=1, iterations=iterations, bias=None))) experimenter.run()