예제 #1
0
 def test_mv_experiments(self):
     """This method runs the experimenter with five ExperimentMajorityVoteFindVotes experiments."""
     experiments = []
     for i in range(5):
         n = 8
         logger_name = 'test_mv_exp{0}'.format(i)
         experiment = ExperimentMajorityVoteFindVotes(
             log_name=logger_name,
             n=n,
             k=2,
             challenge_count=2**8,
             seed_instance=0xC0DEBA5E,
             seed_instance_noise=0xdeadbeef,
             transformation=LTFArray.transform_id,
             combiner=LTFArray.combiner_xor,
             mu=0,
             sigma=1,
             sigma_noise_ratio=NoisyLTFArray.
             sigma_noise_from_random_weights(n, 1, .5),
             seed_challenges=0xf000 + i,
             desired_stability=0.95,
             overall_desired_stability=0.8,
             minimum_vote_count=1,
             iterations=2,
             bias=None)
         experiments.append(experiment)
     experimenter = Experimenter('test_mv_experimenter', experiments)
     experimenter.run()
예제 #2
0
 def test_lr_experiments(self):
     """This method runs the experimenter for four logistic regression experiments."""
     lr16_4_1 = ExperimentLogisticRegression('test_lr_experiments1', 8, 2,
                                             2**8, 0xbeef, 0xbeef,
                                             LTFArray.transform_id,
                                             LTFArray.combiner_xor)
     lr16_4_2 = ExperimentLogisticRegression('test_lr_experiments2', 8, 2,
                                             2**8, 0xbeef, 0xbeef,
                                             LTFArray.transform_id,
                                             LTFArray.combiner_xor)
     lr16_4_3 = ExperimentLogisticRegression('test_lr_experiments3', 8, 2,
                                             2**8, 0xbeef, 0xbeef,
                                             LTFArray.transform_id,
                                             LTFArray.combiner_xor)
     lr16_4_4 = ExperimentLogisticRegression('test_lr_experiments4', 8, 2,
                                             2**8, 0xbeef, 0xbeef,
                                             LTFArray.transform_id,
                                             LTFArray.combiner_xor)
     lr16_4_5 = ExperimentLogisticRegression('test_lr_experiments5', 8, 2,
                                             2**8, 0xbeef, 0xbeef,
                                             LTFArray.transform_id,
                                             LTFArray.combiner_xor)
     experiments = [lr16_4_1, lr16_4_2, lr16_4_3, lr16_4_4, lr16_4_5]
     experimenter = Experimenter('log', experiments)
     experimenter.run()
예제 #3
0
    def test_multiprocessing_logs(self):
        """
        This test checks for the predicted amount for result.
        """
        experiments = []
        n = 28
        for i in range(n):
            log_name = 'test_multiprocessing_logs{0}'.format(i)
            lr16_4_1 = ExperimentLogisticRegression(log_name, 8, 2, 2 ** 8, 0xbeef, 0xbeef,
                                                    LTFArray.transform_id,
                                                    LTFArray.combiner_xor)
            experiments.append(lr16_4_1)

        for i in range(n):
            log_name = 'test_multiprocessing_logs{0}'.format(i)
            experiment = ExperimentMajorityVoteFindVotes(
                log_name=log_name,
                n=8,
                k=2,
                challenge_count=2 ** 8,
                seed_instance=0xC0DEBA5E,
                seed_instance_noise=0xdeadbeef,
                transformation=LTFArray.transform_id,
                combiner=LTFArray.combiner_xor,
                mu=0,
                sigma=1,
                sigma_noise_ratio=NoisyLTFArray.sigma_noise_from_random_weights(n, 1, .5),
                seed_challenges=0xf000 + i,
                desired_stability=0.95,
                overall_desired_stability=0.8,
                minimum_vote_count=1,
                iterations=2,
                bias=False
            )
            experiments.append(experiment)

        experimenter = Experimenter('test_multiprocessing_logs', experiments)
        experimenter.run()

        def line_count(file_object):
            """
            :param file_object:
            :return: number of lines
            """
            count = 0
            while file_object.readline() != '':
                count = count + 1
            return count

        paths = list(glob.glob('*.log'))
        # Check if the number of lines is greater than zero
        for log_path in paths:
            exp_log_file = open(log_path, 'r')
            self.assertGreater(line_count(exp_log_file), 0, 'The experiment log is empty.')
            exp_log_file.close()

        # Check if the number of results is correct
        log_file = open('test_multiprocessing_logs.log', 'r')
        self.assertEqual(line_count(log_file), n*2, 'Unexpected number of results')
        log_file.close()
예제 #4
0
 def test_lr_experiments(self):
     lr16_4_1 = ExperimentLogisticRegression('exp1.log', 8, 2, 2**8, 0xbeef,
                                             0xbeef, LTFArray.transform_id,
                                             LTFArray.combiner_xor)
     lr16_4_2 = ExperimentLogisticRegression('exp2.log', 8, 2, 2**8, 0xbeef,
                                             0xbeef, LTFArray.transform_id,
                                             LTFArray.combiner_xor)
     lr16_4_3 = ExperimentLogisticRegression('exp3.log', 8, 2, 2**8, 0xbeef,
                                             0xbeef, LTFArray.transform_id,
                                             LTFArray.combiner_xor)
     lr16_4_4 = ExperimentLogisticRegression('exp4.log', 8, 2, 2**8, 0xbeef,
                                             0xbeef, LTFArray.transform_id,
                                             LTFArray.combiner_xor)
     lr16_4_5 = ExperimentLogisticRegression('exp5.log', 8, 2, 2**8, 0xbeef,
                                             0xbeef, LTFArray.transform_id,
                                             LTFArray.combiner_xor)
     experiments = [lr16_4_1, lr16_4_2, lr16_4_3, lr16_4_4, lr16_4_5]
     experimenter = Experimenter('log', experiments)
     experimenter.run()
예제 #5
0
 def test_mv_experiments(self):
     """This method runs the experimenter with five ExperimentMajorityVoteFindVotes experiments."""
     experimenter = Experimenter(LOG_PATH + 'test_mv_experiments')
     for i in range(5):
         n = 8
         logger_name = LOG_PATH + 'test_mv_exp{0}'.format(i)
         experiment = ExperimentMajorityVoteFindVotes(
             progress_log_prefix=logger_name,
             parameters=MVParameters(n=n,
                                     k=2,
                                     challenge_count=2**8,
                                     seed_instance=0xC0DEBA5E,
                                     seed_instance_noise=0xdeadbeef,
                                     transformation='id',
                                     combiner='xor',
                                     mu=0,
                                     sigma=1,
                                     sigma_noise_ratio=NoisyLTFArray.
                                     sigma_noise_from_random_weights(
                                         n, 1, .5),
                                     seed_challenges=0xf000 + i,
                                     desired_stability=0.95,
                                     overall_desired_stability=0.8,
                                     minimum_vote_count=1,
                                     iterations=2,
                                     bias=None))
         experimenter.queue(experiment)
     experimenter.run()
예제 #6
0
    def test_file_handle(self):
        """
        This test check if process file handles are deleted. Some Systems have have limit of open file handles.
        """
        class ExperimentDummy(Experiment):
            """
            This is an empty experiment class which can be used to run a huge amount of experiments with an
            experimenter.
            """
            def run(self):
                pass

            def analyze(self):
                pass

        experiments = []
        n = 1024
        for i in range(n):
            log_name = 'fail{0}'.format(i)
            experiments.append(ExperimentDummy(log_name))

        experimenter = Experimenter('fail', experiments)
        experimenter.run()
예제 #7
0
    def test_file_handle(self):
        """
        This test check if process file handles are deleted. Some Systems have have limit of open file handles. 
        :return: 
        """
        class ExperimentDummy(Experiment):
            def __init__(self, log_name):
                super().__init__(log_name)

            def run(self):
                pass

            def analyze(self):
                pass

        experiments = []
        n = 1024
        for i in range(n):
            log_name = 'fail{0}'.format(i)
            experiments.append(ExperimentDummy(log_name))

        experimenter = Experimenter('fail', experiments)
        experimenter.run()
예제 #8
0
 def test_broken_experiment(self):
     """
     Verify the experimenter handles experiments that raise exceptions correctly.
     """
     experimenter = Experimenter(LOG_PATH + 'test_broken_experiments')
     experimenter.queue(ExperimentBroken('foobar', {}))
     experimenter.queue(ExperimentBroken('foobaz', {}))
     with self.assertRaises(FailedExperimentsException):
         experimenter.run()
예제 #9
0
 def test_lr_experiments(self):
     """This method runs the experimenter for five logistic regression experiments."""
     experimenter = Experimenter(LOG_PATH + 'test_lr_experiments')
     for i in range(5):
         experimenter.queue(
             ExperimentLogisticRegression(
                 LOG_PATH + 'test_lr_experiments{}'.format(i + 1),
                 LRParameters(n=8,
                              k=2,
                              N=2**8,
                              seed_model=0xbeef,
                              seed_distance=0xbeef,
                              seed_instance=0xdead,
                              seed_challenge=0xdead,
                              transformation='id',
                              combiner='xor',
                              mini_batch_size=2,
                              shuffle=False,
                              convergence_decimals=2)))
     experimenter.run()
예제 #10
0
def main(args):
    """
    This method includes the main functionality of the module it parses the argument vector and executes the learning
    attempts on the PUF instances.
    """
    if len(args) < 10 or len(args) > 11:
        stderr.write('LTF Array Simulator and Logistic Regression Learner\n')
        stderr.write('Usage:\n')
        stderr.write('sim_learn.py n k transformation combiner N restarts seed_instance seed_model [log_name]\n')
        stderr.write('               n: number of bits per Arbiter chain\n')
        stderr.write('               k: number of Arbiter chains\n')
        stderr.write('  transformation: used to transform input before it is used in LTFs\n')
        stderr.write('                  currently available:\n')
        stderr.write('                  - id  -- does nothing at all\n')
        stderr.write('                  - atf -- convert according to "natural" Arbiter chain\n')
        stderr.write('                           implementation\n')
        stderr.write('                  - mm  -- designed to achieve maximum PTF expansion length\n')
        stderr.write('                           only implemented for k=2 and even n\n')
        stderr.write('                  - lightweight_secure -- design by Majzoobi et al. 2008\n')
        stderr.write('                                          only implemented for even n\n')
        stderr.write('                  - shift_lightweight_secure -- design like Majzoobi\n')
        stderr.write('                                                et al. 2008, but with the shift\n')
        stderr.write('                                                operation executed first\n')
        stderr.write('                                                only implemented for even n\n')
        stderr.write('                  - soelter_lightweight_secure -- design like Majzoobi\n')
        stderr.write('                                                  et al. 2008, but one bit different\n')
        stderr.write('                                                  only implemented for even n\n')
        stderr.write('                  - 1_n_bent -- one LTF gets "bent" input, the others id\n')
        stderr.write('                  - 1_1_bent -- one bit gets "bent" input, the others id,\n')
        stderr.write('                                this is proven to have maximum PTF\n')
        stderr.write('                                length for the model\n')
        stderr.write('                  - polynomial -- challenges are interpreted as polynomials\n')
        stderr.write('                                  from GF(2^64). From the initial challenge c,\n')
        stderr.write('                                  the i-th Arbiter chain gets the coefficients \n')
        stderr.write('                                  of the polynomial c^(i+1) as challenge.\n')
        stderr.write('                                  For now only challenges with length n=64 are accepted.\n')
        stderr.write(
            '                  - permutation_atf -- for each Arbiter chain first a pseudorandom permutation \n')
        stderr.write('                                       is applied and thereafter the ATF transform.\n')
        stderr.write('                  - random -- Each Arbiter chain gets a random challenge derived from the\n')
        stderr.write('                              original challenge using a PRNG.\n')
        stderr.write('        combiner: used to combine the output bits to a single bit\n')
        stderr.write('                  currently available:\n')
        stderr.write('                  - xor     -- output the parity of all output bits\n')
        stderr.write('                  - ip_mod2 -- output the inner product mod 2 of all output\n')
        stderr.write('                               bits (even n only)\n')
        stderr.write('               N: number of challenge response pairs in the training set\n')
        stderr.write('        restarts: number of repeated initializations the learner\n')
        stderr.write('       instances: number of repeated initializations the instance\n')
        stderr.write('                  The number total learning attempts is restarts*instances.\n')
        stderr.write('   seed_instance: random seed used for LTF array instance\n')
        stderr.write('      seed_model: random seed used for the model in first learning attempt\n')
        stderr.write('      [log_name]: path to the logfile which contains results from all instances. The tool '
                     'will add a ".log" to log_name. The default path is ./sim_learn.log\n')
        quit(1)

    n = int(args[1])
    k = int(args[2])
    transformation_name = args[3]
    combiner_name = args[4]
    N = int(args[5])
    restarts = int(args[6])

    instances = int(args[7])

    seed_instance = int(args[8], 16)
    seed_model = int(args[9], 16)

    transformation = None
    combiner = None

    try:
        transformation = getattr(LTFArray, 'transform_%s' % transformation_name)
    except AttributeError:
        stderr.write('Transformation %s unknown or currently not implemented\n' % transformation_name)
        quit()

    try:
        combiner = getattr(LTFArray, 'combiner_%s' % combiner_name)
    except AttributeError:
        stderr.write('Combiner %s unknown or currently not implemented\n' % combiner_name)
        quit()

    log_name = 'sim_learn'
    if len(args) == 11:
        log_name = args[10]

    stderr.write('Learning %s-bit %s XOR Arbiter PUF with %s CRPs and %s restarts.\n\n' % (n, k, N, restarts))
    stderr.write('Using\n')
    stderr.write('  transformation:       %s\n' % transformation)
    stderr.write('  combiner:             %s\n' % combiner)
    stderr.write('  instance random seed: 0x%x\n' % seed_instance)
    stderr.write('  model random seed:    0x%x\n' % seed_model)
    stderr.write('\n')

    # create different experiment instances
    experiments = []
    for j in range(instances):
        for start_number in range(restarts):
            l_name = '%s_%i_%i' % (log_name, j, start_number)
            experiment = ExperimentLogisticRegression(
                log_name=l_name,
                n=n,
                k=k,
                N=N,
                seed_instance=seed_instance + j,
                seed_model=seed_model + j + start_number,
                transformation=transformation,
                combiner=combiner
            )
            experiments.append(experiment)

    experimenter = Experimenter(log_name, experiments)
    # run the instances
    experimenter.run()

    # output format
    str_format = '{:<15}\t{:<10}\t{:<8}\t{:<8}\t{:<8}\t{:<8}\t{:<18}\t{:<15}\t{:<6}\t{:<8}\t{:<8}\t{:<8}'
    headline = str_format.format(
        'seed_instance', 'seed_model', 'i', 'n', 'k', 'N', 'trans', 'comb', 'iter', 'time', 'accuracy',
        'model_values\n'
    )
    # print the result headline
    stderr.write(headline)

    log_file = open(log_name + '.log', 'r')

    # print the results
    result = log_file.readline()
    while result != '':
        stderr.write(str_format.format(*result.split('\t')))
        result = log_file.readline()

    log_file.close()
예제 #11
0
def main(args):
    """
    This method includes the main functionality of the module it parses the argument vector and executes the learning
    attempts on the PUF instances.
    """
    parser = argparse.ArgumentParser(
        prog='sim_learn',
        description="LTF Array Simulator and Logistic Regression Learner",
    )
    parser.add_argument("n", help="number of bits per Arbiter chain", type=int)
    parser.add_argument("k", help="number of Arbiter chains", type=int)
    parser.add_argument(
        "transformation",
        help=
        "used to transform input before it is used in LTFs. Currently available: "
        '"atf,id",'
        '"lightweight_secure",'
        '"permutation_atf",'
        '"polynomial,random",'
        '"shift",'
        '"soelter_lightweight_secure"',
        type=str,
    )
    parser.add_argument(
        'combiner',
        help=
        'used to combine the output bits to a single bit. Currently available: "ip_mod2", "xor"',
        type=str,
    )
    parser.add_argument(
        'N',
        help='number of challenge response pairs in the training set',
        type=int)
    parser.add_argument('restarts',
                        help='number of repeated initializations the learner',
                        type=int)
    parser.add_argument(
        'instances',
        help='number of repeated initializations the instance\n'
        'The number total learning attempts is restarts*instances.',
        type=int,
    )
    parser.add_argument('seed_instance',
                        help='random seed used for LTF array instance',
                        type=str)
    parser.add_argument(
        'seed_model',
        help='random seed used for the model in first learning attempt',
        type=str)
    parser.add_argument(
        '--log_name',
        help=
        'path to the logfile which contains results from all instances. The tool '
        'will add a ".log" to log_name. The default path is ./sim_learn.log',
        default='sim_learn',
        type=str,
    )
    parser.add_argument(
        '--seed_challenges',
        help='random seed used to draw challenges for the training set',
        type=str,
    )
    parser.add_argument('--seed_distance',
                        help='random seed used to calculate the accuracy',
                        type=str)

    args = parser.parse_args(args)

    n = args.n
    k = args.k
    transformation = args.transformation
    combiner = args.combiner
    N = args.N
    restarts = args.restarts

    instances = args.instances

    seed_instance = int(args.seed_instance, 16)
    seed_model = int(args.seed_model, 16)

    seed_challenges = 0x5A551
    if args.seed_challenges is not None:
        seed_challenges = int(args.seed_challenges, 16)
    seed_distance = 0xB055
    if args.seed_distance is not None:
        seed_distance = int(args.seed_distance, 16)

    try:
        getattr(LTFArray, 'transform_%s' % transformation)
    except AttributeError:
        sys.stderr.write(
            'Transformation %s unknown or currently not implemented\n' %
            transformation)
        quit()

    try:
        getattr(LTFArray, 'combiner_%s' % combiner)
    except AttributeError:
        sys.stderr.write('Combiner %s unknown or currently not implemented\n' %
                         combiner)
        quit()

    log_name = args.log_name

    sys.stderr.write(
        'Learning %s-bit %s XOR Arbiter PUF with %s CRPs and %s restarts.\n\n'
        % (n, k, N, restarts))
    sys.stderr.write('Using\n')
    sys.stderr.write('  transformation:       %s\n' % transformation)
    sys.stderr.write('  combiner:             %s\n' % combiner)
    sys.stderr.write('  instance random seed: 0x%x\n' % seed_instance)
    sys.stderr.write('  model random seed:    0x%x\n' % seed_model)
    sys.stderr.write('\n')

    # create different experiment instances
    experimenter = Experimenter(log_name)
    for j in range(instances):
        for start_number in range(restarts):
            l_name = '%s_%i_%i' % (log_name, j, start_number)
            experiment = ExperimentLogisticRegression(
                progress_log_prefix=l_name,
                parameters=Parameters(
                    n=n,
                    k=k,
                    N=N,
                    seed_instance=seed_instance + j,
                    seed_model=seed_model + j + start_number,
                    transformation=transformation,
                    combiner=combiner,
                    seed_challenge=seed_challenges,
                    seed_distance=seed_distance,
                    convergence_decimals=2,
                    mini_batch_size=0,
                    shuffle=False,
                ))
            experimenter.queue(experiment)

    # run the instances
    experimenter.run()
예제 #12
0
def main(args):

    parser = argparse.ArgumentParser(
        usage="Experiment to determine the minimum number of votes "
        "required to achieve a desired given stability.\n")
    parser.add_argument(
        "stab_c",
        help="Desired stability of the challenges.",
        type=float,
        choices=[0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95])
    parser.add_argument(
        "stab_all",
        help="Overall desired stability.",
        type=float,
        choices=[0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95])
    parser.add_argument("n",
                        help="Number of bits per Arbiter chain.",
                        type=int,
                        choices=[8, 16, 24, 32, 48, 64, 128])
    parser.add_argument("k_max",
                        help="Maximum number of Arbiter chains.",
                        type=int)
    parser.add_argument(
        "k_range",
        help="Number of step size between the number of Arbiter chains",
        type=int,
        choices=range(1, 33))
    parser.add_argument(
        "s_ratio",
        help="Ratio of standard deviation of the noise and weights",
        type=float)
    parser.add_argument("N",
                        help="Number of challenges to evaluate",
                        type=int,
                        choices=range(10, 10001, 10))
    parser.add_argument("restarts",
                        help="Number of restarts to the entire process",
                        type=int)
    args = parser.parse_args(args)

    if args.k_max <= 0:
        stderr.write("Negative maximum number of Arbiter chains")
        quit(1)

    seed_challenges = 0xf000
    iterations = 10
    n = args.n
    N = args.N

    # perform search for minimum number of votes required for each k
    experiments = []
    for i in range(args.restarts):
        for k in range(args.k_range, args.k_max + 1, args.k_range):
            log_name = 'exp{0}'.format(k)
            exp = ExperimentMajorityVoteFindVotes(
                log_name=log_name,
                n=n,
                k=k,
                challenge_count=N,
                seed_instance=0xC0DEBA5E + i,
                seed_instance_noise=0xdeadbeef + i,
                transformation=LTFArray.transform_id,
                combiner=LTFArray.combiner_xor,
                mu=0,
                sigma=1,
                sigma_noise_ratio=args.s_ratio,
                seed_challenges=seed_challenges + i,
                desired_stability=args.stab_c,
                overall_desired_stability=args.stab_all,
                minimum_vote_count=1,
                iterations=iterations,
                bias=False)
            experiments.append(exp)

    experimenter = Experimenter('mv', experiments)
    experimenter.run()
예제 #13
0
    def test_multiprocessing_logs(self):
        """
        This test checks for the predicted amount for result.
        """
        experimenter_log_name = LOG_PATH + 'test_multiprocessing_logs'
        experimenter = Experimenter(experimenter_log_name)

        n = 4
        for i in range(n):
            log_name = LOG_PATH + 'test_multiprocessing_logs{0}'.format(i)
            experimenter.queue(
                ExperimentLogisticRegression(
                    log_name,
                    LRParameters(
                        n=8,
                        k=2,
                        N=2**8,
                        seed_challenge=0xbeef,
                        seed_instance=0xbeef,
                        seed_distance=0xf00,
                        seed_model=0x1,
                        transformation='id',
                        combiner='xor',
                        convergence_decimals=2,
                        mini_batch_size=0,
                        shuffle=False,
                    )))

        for i in range(n):
            log_name = LOG_PATH + 'test_multiprocessing_logs{0}'.format(i)
            experiment = ExperimentMajorityVoteFindVotes(
                progress_log_prefix=log_name,
                parameters=MVParameters(n=8,
                                        k=2,
                                        challenge_count=2**8,
                                        seed_instance=0xC0DEBA5E,
                                        seed_instance_noise=0xdeadbeef,
                                        transformation='id',
                                        combiner='xor',
                                        mu=0,
                                        sigma=1,
                                        sigma_noise_ratio=NoisyLTFArray.
                                        sigma_noise_from_random_weights(
                                            n, 1, .5),
                                        seed_challenges=0xf000 + i,
                                        desired_stability=0.95,
                                        overall_desired_stability=0.6,
                                        minimum_vote_count=1,
                                        iterations=2,
                                        bias=None))
            experimenter.queue(experiment)

        experimenter.run()

        def line_count(file_object):
            """
            :param file_object:
            :return: number of lines
            """
            count = 0
            while file_object.readline() != '':
                count = count + 1
            return count

        paths = list(glob.glob('logs/' + LOG_PATH + '*.log'))
        # Check if the number of lines is greater than zero
        for log_path in paths:
            exp_log_file = open(log_path, 'r')
            self.assertGreater(
                line_count(exp_log_file), 0,
                'The experiment log {} is empty.'.format(log_path))
            exp_log_file.close()

        # Check if the number of results is correct
        with open('logs/' + experimenter_log_name + '.log', 'r') as log_file:
            self.assertEqual(line_count(log_file), 2 * n,
                             'Unexpected number of results')
예제 #14
0
"""
Disable numpy's multiprocessing as soon as possible when importing the
Experimenter. Note that this code will raise an exception if numpy was
already imported, as deactivating multiprocessing is then impossible.
"""
from pypuf.experiments.experimenter import Experimenter


Experimenter.disable_auto_multiprocessing()
예제 #15
0
def main(args):
    """
    This method starts several experiments in order to find the minimal number of votes required to satisfy an
    the desired stability of challenges.
    """
    parser = argparse.ArgumentParser(
        usage="Experiment to determine the minimum number of votes "
        "required to achieve a desired given stability.\n")
    parser.add_argument(
        "stab_c",
        help="Desired stability of the challenges",
        type=float,
        choices=[0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95])
    parser.add_argument(
        "stab_all",
        help="Overall desired stability",
        type=float,
        choices=[0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95])
    parser.add_argument("n",
                        help="Number of bits per Arbiter chain",
                        type=int,
                        choices=[8, 16, 24, 32, 48, 64, 128])
    parser.add_argument("k_max",
                        help="Maximum number of Arbiter chains",
                        type=int)
    parser.add_argument(
        "k_range",
        help="Number of step size between the number of Arbiter chains",
        type=int,
        choices=range(1, 33))
    parser.add_argument(
        "s_ratio",
        help="Ratio of standard deviation of the noise and weights",
        type=float)
    parser.add_argument("N", help="Number of challenges to evaluate", type=int)
    parser.add_argument("restarts",
                        help="Number of restarts to the entire process",
                        type=int)
    parser.add_argument("--log_name",
                        help="Path to the main log file.",
                        type=str,
                        default='my_num_of_votes')
    args = parser.parse_args(args)

    if args.k_max <= 0:
        stderr.write("Negative maximum number of Arbiter chains")
        quit(1)

    seed_challenges = 0xf000
    iterations = 10
    n = args.n
    N = args.N

    # perform search for minimum number of votes required for each k
    experimenter = Experimenter(args.log_name)
    for i in range(args.restarts):
        for k in range(args.k_range, args.k_max + 1, args.k_range):
            log_name = args.log_name + '{0}'.format(k)
            experimenter.queue(
                ExperimentMajorityVoteFindVotes(
                    progress_log_prefix=log_name,
                    parameters=Parameters(
                        n=n,
                        k=k,
                        challenge_count=N,
                        seed_instance=0xC0DEBA5E + i,
                        seed_instance_noise=0xdeadbeef + i,
                        transformation='id',
                        combiner='xor',
                        mu=0,
                        sigma=1,
                        sigma_noise_ratio=args.s_ratio,
                        seed_challenges=seed_challenges + i,
                        desired_stability=args.stab_c,
                        overall_desired_stability=args.stab_all,
                        minimum_vote_count=1,
                        iterations=iterations,
                        bias=None)))

    experimenter.run()