def test_fix_convergence(self, logger): """ This methods checks the results of a ExperimentLogisticRegression to match a recorded result which was generated with the same parameters. """ n = 8 k = 2 N = 255 seed_instance = 0xBAE55E seed_model = 0x5C6AE1E seed_challenge = 0xB0661E seed_distance = 0xB0C experiment = ExperimentLogisticRegression( LOG_PATH + 'exp', LRParameters(n=n, k=k, N=N, seed_model=seed_model, seed_distance=seed_distance, seed_instance=seed_instance, seed_challenge=seed_challenge, transformation='soelter_lightweight_secure', combiner='xor', mini_batch_size=0, shuffle=False, convergence_decimals=2)) experiment.execute(logger.queue, logger.logger_name) result_str = logger.read_result_log() self.assertFalse(result_str == '', 'The result log was empty.') error = 'LR learning results deviate from legacy learning results.' self.assertEqual(experiment.result.iteration_count, 274, error) self.assertEqual(experiment.result.epoch_count, 274, error) self.assertEqual(experiment.result.gradient_step_count, 274, error) self.assertEqual(experiment.result.accuracy, 0.98828125, error) print(experiment.result.model) assert_array_equal( around(experiment.result.model, decimals=8), around([ 0.00351544, -0.00504143, 0.01470355, 0.00481524, 0.00649157, -0.00301955, -0.0025765, 0.00841928, 0., 0.03248558, 0.37524702, -0.0683109, 0.40447738, 0.49995907, 0.52796785, 0.00060493, -0.40093716, 0., ], decimals=8), error)
def test_lr_experiments(self): """This method runs the experimenter for four logistic regression experiments.""" lr16_4_1 = ExperimentLogisticRegression('test_lr_experiments1', 8, 2, 2**8, 0xbeef, 0xbeef, LTFArray.transform_id, LTFArray.combiner_xor) lr16_4_2 = ExperimentLogisticRegression('test_lr_experiments2', 8, 2, 2**8, 0xbeef, 0xbeef, LTFArray.transform_id, LTFArray.combiner_xor) lr16_4_3 = ExperimentLogisticRegression('test_lr_experiments3', 8, 2, 2**8, 0xbeef, 0xbeef, LTFArray.transform_id, LTFArray.combiner_xor) lr16_4_4 = ExperimentLogisticRegression('test_lr_experiments4', 8, 2, 2**8, 0xbeef, 0xbeef, LTFArray.transform_id, LTFArray.combiner_xor) lr16_4_5 = ExperimentLogisticRegression('test_lr_experiments5', 8, 2, 2**8, 0xbeef, 0xbeef, LTFArray.transform_id, LTFArray.combiner_xor) experiments = [lr16_4_1, lr16_4_2, lr16_4_3, lr16_4_4, lr16_4_5] experimenter = Experimenter('log', experiments) experimenter.run()
class TestExperimentLogisticRegression(unittest.TestCase): def test_run_and_analyze(self): self.lr16_4 = ExperimentLogisticRegression('exp1.log', 8, 2, 2**8, 0xbeef, 0xbeef, LTFArray.transform_id, LTFArray.combiner_xor) self.lr16_4.execute()
def test_run_and_analyze(self, logger): """ This method only runs the experiment. """ lr16_4 = ExperimentLogisticRegression( LOG_PATH + 'exp1', 8, 2, 2**8, 0xbeef, 0xbeef, LTFArray.transform_id, LTFArray.combiner_xor, ) lr16_4.execute(logger.queue, logger.logger_name)
def test_multiprocessing_logs(self): """ This test checks for the predicted amount for result. """ experiments = [] n = 28 for i in range(n): log_name = 'test_multiprocessing_logs{0}'.format(i) lr16_4_1 = ExperimentLogisticRegression(log_name, 8, 2, 2 ** 8, 0xbeef, 0xbeef, LTFArray.transform_id, LTFArray.combiner_xor) experiments.append(lr16_4_1) for i in range(n): log_name = 'test_multiprocessing_logs{0}'.format(i) experiment = ExperimentMajorityVoteFindVotes( log_name=log_name, n=8, k=2, challenge_count=2 ** 8, seed_instance=0xC0DEBA5E, seed_instance_noise=0xdeadbeef, transformation=LTFArray.transform_id, combiner=LTFArray.combiner_xor, mu=0, sigma=1, sigma_noise_ratio=NoisyLTFArray.sigma_noise_from_random_weights(n, 1, .5), seed_challenges=0xf000 + i, desired_stability=0.95, overall_desired_stability=0.8, minimum_vote_count=1, iterations=2, bias=False ) experiments.append(experiment) experimenter = Experimenter('test_multiprocessing_logs', experiments) experimenter.run() def line_count(file_object): """ :param file_object: :return: number of lines """ count = 0 while file_object.readline() != '': count = count + 1 return count paths = list(glob.glob('*.log')) # Check if the number of lines is greater than zero for log_path in paths: exp_log_file = open(log_path, 'r') self.assertGreater(line_count(exp_log_file), 0, 'The experiment log is empty.') exp_log_file.close() # Check if the number of results is correct log_file = open('test_multiprocessing_logs.log', 'r') self.assertEqual(line_count(log_file), n*2, 'Unexpected number of results') log_file.close()
def test_fix_convergence(self, logger): """ This methods checks the results of a ExperimentLogisticRegression to match a recorded result which was generated with the same parameters. """ n = 8 k = 2 N = 255 seed_instance = 0xBAE55E seed_model = 0x5C6AE1E seed_challenge = 0xB0661E seed_distance = 0xB0C experiment = ExperimentLogisticRegression( LOG_PATH + 'exp', LRParameters(n=n, k=k, N=N, seed_model=seed_model, seed_distance=seed_distance, seed_instance=seed_instance, seed_challenge=seed_challenge, transformation='soelter_lightweight_secure', combiner='xor', mini_batch_size=0, shuffle=False, convergence_decimals=2)) experiment.execute(logger.queue, logger.logger_name) result_str = logger.read_result_log() self.assertFalse(result_str == '', 'The result log was empty.') error = 'LR learning results deviate from legacy learning results.' self.assertEqual(experiment.result.iteration_count, 256, error) self.assertEqual(experiment.result.epoch_count, 256, error) self.assertEqual(experiment.result.gradient_step_count, 256, error) self.assertEqual(experiment.result.accuracy, 0.98828125, error) assert_array_equal( around(experiment.result.model, decimals=8), around([ 3.99071615e-03, -5.65532862e-03, 1.63862406e-02, 5.37762262e-03, 7.29781422e-03, -3.35141930e-03, -2.95642974e-03, 9.40114614e-03, 1.26572532e-07, 3.49183531e-02, 3.68758330e-01, -7.85028286e-02, 4.17595994e-01, 5.09973673e-01, 5.13855116e-01, 2.97216086e-04, -3.96978992e-01, -5.41390228e-03 ], decimals=8), error)
def test_fix_convergence(self, logger): """ This methods checks the results of a ExperimentLogisticRegression to match a recorded result which was generated with the same parameters. """ n = 8 k = 2 N = 255 seed_instance = 0xBAE55E seed_model = 0x5C6AE1E seed_challenge = 0xB0661E seed_distance = 0xB0C experiment = ExperimentLogisticRegression( LOG_PATH + 'exp', n, k, N, seed_instance, seed_model, LTFArray.transform_soelter_lightweight_secure, LTFArray.combiner_xor, seed_challenge=seed_challenge, seed_chl_distance=seed_distance, ) experiment.execute(logger.queue, logger.logger_name) legacy_result = [ '0xbae55e', '0x5c6ae1e', '0', '8', '2', '255', 'transform_soelter_lightweight_secure', 'combiner_xor', '363', '1.000000', '0.00443419669755,-0.00616546911566,0.0186346081194,' '0.0061619719475,0.00795284461334,-0.00443539877583,' '-0.00316047872599,0.00993214368373,0.0507595729459,' '0.415207373134,-0.0517173737839,0.285900582842,' '0.467512016377,0.550102231366,-0.000739711610042,' '-0.467757977178\n' ] result_str = logger.read_result_log() self.assertFalse(result_str == '', 'The result log was empty.') experiment_result = result_str.split('\t') # remove execution time del experiment_result[9] self.assertTrue(experiment_result == legacy_result, 'You changed the code significant.')
def test_run_and_analyze(self, logger): """ This method only runs the experiment. """ lr16_4 = ExperimentLogisticRegression( LOG_PATH + 'exp1', LRParameters(n=8, k=2, N=2**8, seed_model=0xbeef, seed_distance=0xbeef, seed_instance=0xdead, seed_challenge=0xdead, transformation='id', combiner='xor', mini_batch_size=2, shuffle=False, convergence_decimals=2)) lr16_4.execute(logger.queue, logger.logger_name)
def test_lr_experiments(self): lr16_4_1 = ExperimentLogisticRegression('exp1.log', 8, 2, 2**8, 0xbeef, 0xbeef, LTFArray.transform_id, LTFArray.combiner_xor) lr16_4_2 = ExperimentLogisticRegression('exp2.log', 8, 2, 2**8, 0xbeef, 0xbeef, LTFArray.transform_id, LTFArray.combiner_xor) lr16_4_3 = ExperimentLogisticRegression('exp3.log', 8, 2, 2**8, 0xbeef, 0xbeef, LTFArray.transform_id, LTFArray.combiner_xor) lr16_4_4 = ExperimentLogisticRegression('exp4.log', 8, 2, 2**8, 0xbeef, 0xbeef, LTFArray.transform_id, LTFArray.combiner_xor) lr16_4_5 = ExperimentLogisticRegression('exp5.log', 8, 2, 2**8, 0xbeef, 0xbeef, LTFArray.transform_id, LTFArray.combiner_xor) experiments = [lr16_4_1, lr16_4_2, lr16_4_3, lr16_4_4, lr16_4_5] experimenter = Experimenter('log', experiments) experimenter.run()
def test_run_and_analyze(self): logger_name = 'log' # Setup multiprocessing logging queue = multiprocessing.Queue(-1) listener = multiprocessing.Process(target=log_listener, args=( queue, setup_logger, logger_name, )) listener.start() self.lr16_4 = ExperimentLogisticRegression('exp1', 8, 2, 2**8, 0xbeef, 0xbeef, LTFArray.transform_id, LTFArray.combiner_xor) self.lr16_4.execute(queue, logger_name) queue.put_nowait(None) listener.join()
def get_exp(name, k, trans, comb): """Experiment creation shortcut :param name: string Name of the experiment """ return ExperimentLogisticRegression( LOG_PATH + name, n, k, N, seed_instance, seed_model, trans, comb, seed_challenge=seed_challenge, seed_chl_distance=seed_distance, )
def get_exp(name, trans, comb): """Experiment creation shortcut :param name: string Name of the experiment """ return ExperimentLogisticRegression( LOG_PATH + name, LRParameters(n=n, k=k, N=N, seed_model=seed_model, seed_distance=seed_distance, seed_instance=seed_instance, seed_challenge=seed_challenge, transformation=trans, combiner=comb, mini_batch_size=0, shuffle=False, convergence_decimals=1))
def test_lr_experiments(self): """This method runs the experimenter for five logistic regression experiments.""" experimenter = Experimenter(LOG_PATH + 'test_lr_experiments') for i in range(5): experimenter.queue( ExperimentLogisticRegression( LOG_PATH + 'test_lr_experiments{}'.format(i + 1), LRParameters(n=8, k=2, N=2**8, seed_model=0xbeef, seed_distance=0xbeef, seed_instance=0xdead, seed_challenge=0xdead, transformation='id', combiner='xor', mini_batch_size=2, shuffle=False, convergence_decimals=2))) experimenter.run()
def experiments(self): e = [] (n, k) = self.SIZE for transformation in self.LR_TRANSFORMATIONS: for i in range(self.SAMPLE_SIZE): e.append( ExperimentLogisticRegression( progress_log_prefix=None, parameters=LRParameters( seed_instance=314159 + i, seed_model=265358 + i, seed_challenge=979323 + i, seed_distance=846264 + i, n=n, k=k, transformation=transformation, combiner='xor', N=self.CRPS, mini_batch_size=0, convergence_decimals=2, shuffle=False, ))) for i in range(self.SAMPLE_SIZE): e.append( ExperimentCorrelationAttack(progress_log_prefix=None, parameters=CorrParameters( seed_instance=314159 + i, seed_model=265358 + i, seed_challenge=979323 + i, seed_distance=846264 + i, n=n, k=k, N=self.CRPS, lr_iteration_limit=1000, mini_batch_size=0, convergence_decimals=2, shuffle=False, ))) return e
def experiments(self): experiments = [] for (n, k, training_set_sizes) in self.DEFINITIONS: for shuffle in [True, False]: filename = 'figures/lr-minibatch-' + \ ('shuffle' if shuffle else 'noshuffle') + '-success_rate-%i-%i.pdf' % (n, k) plot = SuccessRatePlot( filename=filename, group_by='mini_batch_size', group_labels=self.GROUP_LABELS, ) self.result_plots.append(plot) for training_set_size in training_set_sizes: for i in range(self.SAMPLES_PER_POINT): for mini_batch_size in [None ] + self.MINI_BATCH_SIZES[k]: if mini_batch_size and mini_batch_size >= training_set_size: break e = ExperimentLogisticRegression( progress_log_prefix=None, parameters=Parameters( n=n, k=k, N=training_set_size, seed_instance=314159 + i, seed_model=265358 + i, transformation='id', combiner='xor', seed_challenge=979323 + i, seed_distance=846264 + i, mini_batch_size=mini_batch_size or 0, convergence_decimals=1.5 if not mini_batch_size else 2.7, shuffle=False if mini_batch_size is None else shuffle, )) experiments.append(e) plot.experiment_hashes.append(e.hash) return experiments
def experiments(self): experiments = [] for idx, transformation in self.INPUT_TRANSFORMATIONS.items(): for training_set_size in self.TRAINING_SET_SIZES[idx]: for i in range(self.SAMPLES_PER_POINT): experiments.append( ExperimentLogisticRegression( progress_log_prefix=None, parameters=Parameters( n=64, k=4, N=training_set_size, seed_instance=314159 + i, seed_model=265358 + i, transformation=transformation, combiner='xor', seed_challenge=979323 + i, seed_distance=846264 + i, convergence_decimals=2, mini_batch_size=0, shuffle=False, ))) return experiments
def experiments(self): experiments = [] for i in range(self.SAMPLES_PER_ENTRY): for p in self.PARAMETERS: common_parameters = { 'n': p.n, 'k': p.k, 'N': p.N, 'seed_instance': 314159 + i, 'seed_model': 265358 + i, 'seed_challenge': 979323 + i, 'seed_distance': 846264 + i, 'mini_batch_size': 0, 'convergence_decimals': 2, 'shuffle': False } ex = ExperimentCorrelationAttack( progress_log_prefix=self.name(), parameters=CorrelationAttackParameters( **common_parameters, lr_iteration_limit=1000, ) ) experiments.append(ex) for transform in self.TRANSFORMATIONS: ex = ExperimentLogisticRegression( progress_log_prefix=self.name(), parameters=LogisticRegressionParameters( **common_parameters, transformation=transform.__name__, combiner=LTFArray.combiner_xor.__name__, ) ) experiments.append(ex) return experiments
def main(args): """ This method includes the main functionality of the module it parses the argument vector and executes the learning attempts on the PUF instances. """ if len(args) < 10 or len(args) > 11: stderr.write('LTF Array Simulator and Logistic Regression Learner\n') stderr.write('Usage:\n') stderr.write('sim_learn.py n k transformation combiner N restarts seed_instance seed_model [log_name]\n') stderr.write(' n: number of bits per Arbiter chain\n') stderr.write(' k: number of Arbiter chains\n') stderr.write(' transformation: used to transform input before it is used in LTFs\n') stderr.write(' currently available:\n') stderr.write(' - id -- does nothing at all\n') stderr.write(' - atf -- convert according to "natural" Arbiter chain\n') stderr.write(' implementation\n') stderr.write(' - mm -- designed to achieve maximum PTF expansion length\n') stderr.write(' only implemented for k=2 and even n\n') stderr.write(' - lightweight_secure -- design by Majzoobi et al. 2008\n') stderr.write(' only implemented for even n\n') stderr.write(' - shift_lightweight_secure -- design like Majzoobi\n') stderr.write(' et al. 2008, but with the shift\n') stderr.write(' operation executed first\n') stderr.write(' only implemented for even n\n') stderr.write(' - soelter_lightweight_secure -- design like Majzoobi\n') stderr.write(' et al. 2008, but one bit different\n') stderr.write(' only implemented for even n\n') stderr.write(' - 1_n_bent -- one LTF gets "bent" input, the others id\n') stderr.write(' - 1_1_bent -- one bit gets "bent" input, the others id,\n') stderr.write(' this is proven to have maximum PTF\n') stderr.write(' length for the model\n') stderr.write(' - polynomial -- challenges are interpreted as polynomials\n') stderr.write(' from GF(2^64). From the initial challenge c,\n') stderr.write(' the i-th Arbiter chain gets the coefficients \n') stderr.write(' of the polynomial c^(i+1) as challenge.\n') stderr.write(' For now only challenges with length n=64 are accepted.\n') stderr.write( ' - permutation_atf -- for each Arbiter chain first a pseudorandom permutation \n') stderr.write(' is applied and thereafter the ATF transform.\n') stderr.write(' - random -- Each Arbiter chain gets a random challenge derived from the\n') stderr.write(' original challenge using a PRNG.\n') stderr.write(' combiner: used to combine the output bits to a single bit\n') stderr.write(' currently available:\n') stderr.write(' - xor -- output the parity of all output bits\n') stderr.write(' - ip_mod2 -- output the inner product mod 2 of all output\n') stderr.write(' bits (even n only)\n') stderr.write(' N: number of challenge response pairs in the training set\n') stderr.write(' restarts: number of repeated initializations the learner\n') stderr.write(' instances: number of repeated initializations the instance\n') stderr.write(' The number total learning attempts is restarts*instances.\n') stderr.write(' seed_instance: random seed used for LTF array instance\n') stderr.write(' seed_model: random seed used for the model in first learning attempt\n') stderr.write(' [log_name]: path to the logfile which contains results from all instances. The tool ' 'will add a ".log" to log_name. The default path is ./sim_learn.log\n') quit(1) n = int(args[1]) k = int(args[2]) transformation_name = args[3] combiner_name = args[4] N = int(args[5]) restarts = int(args[6]) instances = int(args[7]) seed_instance = int(args[8], 16) seed_model = int(args[9], 16) transformation = None combiner = None try: transformation = getattr(LTFArray, 'transform_%s' % transformation_name) except AttributeError: stderr.write('Transformation %s unknown or currently not implemented\n' % transformation_name) quit() try: combiner = getattr(LTFArray, 'combiner_%s' % combiner_name) except AttributeError: stderr.write('Combiner %s unknown or currently not implemented\n' % combiner_name) quit() log_name = 'sim_learn' if len(args) == 11: log_name = args[10] stderr.write('Learning %s-bit %s XOR Arbiter PUF with %s CRPs and %s restarts.\n\n' % (n, k, N, restarts)) stderr.write('Using\n') stderr.write(' transformation: %s\n' % transformation) stderr.write(' combiner: %s\n' % combiner) stderr.write(' instance random seed: 0x%x\n' % seed_instance) stderr.write(' model random seed: 0x%x\n' % seed_model) stderr.write('\n') # create different experiment instances experiments = [] for j in range(instances): for start_number in range(restarts): l_name = '%s_%i_%i' % (log_name, j, start_number) experiment = ExperimentLogisticRegression( log_name=l_name, n=n, k=k, N=N, seed_instance=seed_instance + j, seed_model=seed_model + j + start_number, transformation=transformation, combiner=combiner ) experiments.append(experiment) experimenter = Experimenter(log_name, experiments) # run the instances experimenter.run() # output format str_format = '{:<15}\t{:<10}\t{:<8}\t{:<8}\t{:<8}\t{:<8}\t{:<18}\t{:<15}\t{:<6}\t{:<8}\t{:<8}\t{:<8}' headline = str_format.format( 'seed_instance', 'seed_model', 'i', 'n', 'k', 'N', 'trans', 'comb', 'iter', 'time', 'accuracy', 'model_values\n' ) # print the result headline stderr.write(headline) log_file = open(log_name + '.log', 'r') # print the results result = log_file.readline() while result != '': stderr.write(str_format.format(*result.split('\t'))) result = log_file.readline() log_file.close()
def main(args): """ This method includes the main functionality of the module it parses the argument vector and executes the learning attempts on the PUF instances. """ parser = argparse.ArgumentParser( prog='sim_learn', description="LTF Array Simulator and Logistic Regression Learner", ) parser.add_argument("n", help="number of bits per Arbiter chain", type=int) parser.add_argument("k", help="number of Arbiter chains", type=int) parser.add_argument( "transformation", help= "used to transform input before it is used in LTFs. Currently available: " '"atf,id",' '"lightweight_secure",' '"permutation_atf",' '"polynomial,random",' '"shift",' '"soelter_lightweight_secure"', type=str, ) parser.add_argument( 'combiner', help= 'used to combine the output bits to a single bit. Currently available: "ip_mod2", "xor"', type=str, ) parser.add_argument( 'N', help='number of challenge response pairs in the training set', type=int) parser.add_argument('restarts', help='number of repeated initializations the learner', type=int) parser.add_argument( 'instances', help='number of repeated initializations the instance\n' 'The number total learning attempts is restarts*instances.', type=int, ) parser.add_argument('seed_instance', help='random seed used for LTF array instance', type=str) parser.add_argument( 'seed_model', help='random seed used for the model in first learning attempt', type=str) parser.add_argument( '--log_name', help= 'path to the logfile which contains results from all instances. The tool ' 'will add a ".log" to log_name. The default path is ./sim_learn.log', default='sim_learn', type=str, ) parser.add_argument( '--seed_challenges', help='random seed used to draw challenges for the training set', type=str, ) parser.add_argument('--seed_distance', help='random seed used to calculate the accuracy', type=str) args = parser.parse_args(args) n = args.n k = args.k transformation = args.transformation combiner = args.combiner N = args.N restarts = args.restarts instances = args.instances seed_instance = int(args.seed_instance, 16) seed_model = int(args.seed_model, 16) seed_challenges = 0x5A551 if args.seed_challenges is not None: seed_challenges = int(args.seed_challenges, 16) seed_distance = 0xB055 if args.seed_distance is not None: seed_distance = int(args.seed_distance, 16) try: getattr(LTFArray, 'transform_%s' % transformation) except AttributeError: sys.stderr.write( 'Transformation %s unknown or currently not implemented\n' % transformation) quit() try: getattr(LTFArray, 'combiner_%s' % combiner) except AttributeError: sys.stderr.write('Combiner %s unknown or currently not implemented\n' % combiner) quit() log_name = args.log_name sys.stderr.write( 'Learning %s-bit %s XOR Arbiter PUF with %s CRPs and %s restarts.\n\n' % (n, k, N, restarts)) sys.stderr.write('Using\n') sys.stderr.write(' transformation: %s\n' % transformation) sys.stderr.write(' combiner: %s\n' % combiner) sys.stderr.write(' instance random seed: 0x%x\n' % seed_instance) sys.stderr.write(' model random seed: 0x%x\n' % seed_model) sys.stderr.write('\n') # create different experiment instances experimenter = Experimenter(log_name) for j in range(instances): for start_number in range(restarts): l_name = '%s_%i_%i' % (log_name, j, start_number) experiment = ExperimentLogisticRegression( progress_log_prefix=l_name, parameters=Parameters( n=n, k=k, N=N, seed_instance=seed_instance + j, seed_model=seed_model + j + start_number, transformation=transformation, combiner=combiner, seed_challenge=seed_challenges, seed_distance=seed_distance, convergence_decimals=2, mini_batch_size=0, shuffle=False, )) experimenter.queue(experiment) # run the instances experimenter.run()
def test_multiprocessing_logs(self): """ This test checks for the predicted amount for result. """ experimenter_log_name = LOG_PATH + 'test_multiprocessing_logs' experimenter = Experimenter(experimenter_log_name) n = 4 for i in range(n): log_name = LOG_PATH + 'test_multiprocessing_logs{0}'.format(i) experimenter.queue( ExperimentLogisticRegression( log_name, LRParameters( n=8, k=2, N=2**8, seed_challenge=0xbeef, seed_instance=0xbeef, seed_distance=0xf00, seed_model=0x1, transformation='id', combiner='xor', convergence_decimals=2, mini_batch_size=0, shuffle=False, ))) for i in range(n): log_name = LOG_PATH + 'test_multiprocessing_logs{0}'.format(i) experiment = ExperimentMajorityVoteFindVotes( progress_log_prefix=log_name, parameters=MVParameters(n=8, k=2, challenge_count=2**8, seed_instance=0xC0DEBA5E, seed_instance_noise=0xdeadbeef, transformation='id', combiner='xor', mu=0, sigma=1, sigma_noise_ratio=NoisyLTFArray. sigma_noise_from_random_weights( n, 1, .5), seed_challenges=0xf000 + i, desired_stability=0.95, overall_desired_stability=0.6, minimum_vote_count=1, iterations=2, bias=None)) experimenter.queue(experiment) experimenter.run() def line_count(file_object): """ :param file_object: :return: number of lines """ count = 0 while file_object.readline() != '': count = count + 1 return count paths = list(glob.glob('logs/' + LOG_PATH + '*.log')) # Check if the number of lines is greater than zero for log_path in paths: exp_log_file = open(log_path, 'r') self.assertGreater( line_count(exp_log_file), 0, 'The experiment log {} is empty.'.format(log_path)) exp_log_file.close() # Check if the number of results is correct with open('logs/' + experimenter_log_name + '.log', 'r') as log_file: self.assertEqual(line_count(log_file), 2 * n, 'Unexpected number of results')