예제 #1
0
 def test_init_normal_empirical(self):
     """
     Test if initialization by intra distance yields the desired intra distance.
     """
     for intra_dist in [.1, .2, .3]:
         nla = NoisyLTFArray.init_normal_empirical(
             32,
             1,
             NoisyLTFArray.transform_id,
             NoisyLTFArray.combiner_xor,
             intra_dist,
             approx_threshold=.01,
             random_instance=RandomState(0xbeef))
         self.assertTrue(
             abs(tools.approx_dist(nla, nla, 10000) - intra_dist) < .02)
     for intra_dist in [.1, .2, .3]:
         nla = NoisyLTFArray.init_normal_empirical(
             64,
             4,
             NoisyLTFArray.transform_id,
             NoisyLTFArray.combiner_xor,
             intra_dist,
             approx_threshold=.1,
             random_instance=RandomState(0xbeef))
         self.assertTrue(
             abs(tools.approx_dist(nla, nla, 10000) - intra_dist) < .15)
예제 #2
0
    def test_reliability(self):
        """This method tests the test_reliability calculation."""
        n = 8
        k = 8
        N = 2**n
        transformation = LTFArray.transform_id
        combiner = LTFArray.combiner_xor
        instance = LTFArray(
            weight_array=LTFArray.normal_weights(
                n=n, k=k, random_instance=RandomState(0xA1A1)),
            transform=transformation,
            combiner=combiner,
        )
        challenges = sample_inputs(n, N, random_instance=RandomState(0xFAB1A))
        reliabilities = []
        for challenge in challenges:
            reliabilities.append(
                PropertyTest.reliability(instance, reshape(challenge, (1, n))))

        # For noiseless simulations the responses are always the same hence the reliability is 0%
        assert_array_equal(reliabilities, repeat(0.0, N))

        noisy_instance = NoisyLTFArray(
            weight_array=NoisyLTFArray.normal_weights(
                n=n, k=k, random_instance=RandomState(0xA1A1)),
            transform=transformation,
            combiner=combiner,
            sigma_noise=15.0,
            random_instance=RandomState(0x5015E),
        )
        for challenge in challenges:
            reliability = PropertyTest.reliability(noisy_instance,
                                                   reshape(challenge, (1, n)))
            # For noisy simulations the responses should vary
            self.assertNotEqual(reliability, 0.0)
예제 #3
0
    def test_ltf_eval(self):
        """
        Test ltf_eval for correct evaluation of LTFs.
        """

        #random.normal(loc=0, scale=self.sigma_noise, size=(1, self.k))

        weight_prng_1 = RandomState(seed=0xBADA55)
        weight_prng_2 = RandomState(seed=0xBADA55)
        noise_prng_1 = RandomState(seed=0xC0FFEE)
        noise_prng_2 = RandomState(seed=0xC0FFEE)

        N = 100  # number of random inputs per test set

        for test_parameters in self.test_set:
            n = test_parameters[0]
            k = test_parameters[1]
            mu = test_parameters[2]
            sigma = test_parameters[3]

            transformed_inputs = LTFArray.transform_id(
                RandomState(seed=0xBAADA555).choice([-1, +1],
                                                    (N, n)),  # bad ass testing
                k)

            ltf_array = LTFArray(
                weight_array=LTFArray.normal_weights(n, k, mu, sigma,
                                                     weight_prng_1),
                transform=LTFArray.transform_id,
                combiner=LTFArray.combiner_xor,
            )

            noisy_ltf_array = NoisyLTFArray(
                weight_array=LTFArray.normal_weights(
                    n, k, mu, sigma, weight_prng_2
                ),  # weight_prng_2 was seeded identically to weight_prng_1
                transform=LTFArray.transform_id,
                combiner=LTFArray.combiner_xor,
                sigma_noise=1,
                random_instance=noise_prng_1,
            )

            evaled_ltf_array = ltf_array.ltf_eval(transformed_inputs)
            assert_array_equal(
                around(evaled_ltf_array + noise_prng_2.normal(
                    loc=0, scale=1, size=(len(evaled_ltf_array), k)),
                       decimals=10),
                around(noisy_ltf_array.ltf_eval(transformed_inputs),
                       decimals=10))
예제 #4
0
def example_reliability():
    """This method shows how to use the PropertyTest.reliability function."""
    n = 8
    k = 8
    transformation = NoisyLTFArray.transform_id
    combiner = NoisyLTFArray.combiner_xor
    weights = NoisyLTFArray.normal_weights(n=n, k=k)
    instance = NoisyLTFArray(
        weight_array=weights,
        transform=transformation,
        combiner=combiner,
        sigma_noise=NoisyLTFArray.sigma_noise_from_random_weights(n, 0.5))
    challenge = array([-1, 1, 1, 1, -1, 1, 1, 1])
    reliability = PropertyTest.reliability(instance,
                                           reshape(challenge, (1, n)))
    print('The reliability is {}.'.format(reliability))
예제 #5
0
 def test_mv_experiments(self):
     experiments = []
     for i in range(5):
         n = 8
         logger_name = 'test_mv_exp{0}'.format(i)
         experiment = ExperimentMajorityVoteFindVotes(
             log_name=logger_name,
             n=n,
             k=2,
             challenge_count=2 ** 8,
             seed_instance=0xC0DEBA5E,
             seed_instance_noise=0xdeadbeef,
             transformation=LTFArray.transform_id,
             combiner=LTFArray.combiner_xor,
             mu=0,
             sigma=1,
             sigma_noise_ratio=NoisyLTFArray.sigma_noise_from_random_weights(n, 1, .5),
             seed_challenges=0xf000+i,
             desired_stability=0.95,
             overall_desired_stability=0.8,
             minimum_vote_count=1,
             iterations=2,
             bias=False
         )
         experiments.append(experiment)
     experimenter = Experimenter('test_mv_experimenter', experiments)
     experimenter.run()
예제 #6
0
    def test_multiprocessing_logs(self):
        """
        This test checks for the predicted amount for result.
        """
        experiments = []
        n = 28
        for i in range(n):
            log_name = 'test_multiprocessing_logs{0}'.format(i)
            lr16_4_1 = ExperimentLogisticRegression(log_name, 8, 2, 2 ** 8, 0xbeef, 0xbeef,
                                                    LTFArray.transform_id,
                                                    LTFArray.combiner_xor)
            experiments.append(lr16_4_1)

        for i in range(n):
            log_name = 'test_multiprocessing_logs{0}'.format(i)
            experiment = ExperimentMajorityVoteFindVotes(
                log_name=log_name,
                n=8,
                k=2,
                challenge_count=2 ** 8,
                seed_instance=0xC0DEBA5E,
                seed_instance_noise=0xdeadbeef,
                transformation=LTFArray.transform_id,
                combiner=LTFArray.combiner_xor,
                mu=0,
                sigma=1,
                sigma_noise_ratio=NoisyLTFArray.sigma_noise_from_random_weights(n, 1, .5),
                seed_challenges=0xf000 + i,
                desired_stability=0.95,
                overall_desired_stability=0.8,
                minimum_vote_count=1,
                iterations=2,
                bias=False
            )
            experiments.append(experiment)

        experimenter = Experimenter('test_multiprocessing_logs', experiments)
        experimenter.run()

        def line_count(file_object):
            """
            :param file_object:
            :return: number of lines
            """
            count = 0
            while file_object.readline() != '':
                count = count + 1
            return count

        paths = list(glob.glob('*.log'))
        # Check if the number of lines is greater than zero
        for log_path in paths:
            exp_log_file = open(log_path, 'r')
            self.assertGreater(line_count(exp_log_file), 0, 'The experiment log is empty.')
            exp_log_file.close()

        # Check if the number of results is correct
        log_file = open('test_multiprocessing_logs.log', 'r')
        self.assertEqual(line_count(log_file), n*2, 'Unexpected number of results')
        log_file.close()
예제 #7
0
    def test_run_and_analyze_bias_value(self, logger):
        """
        This method runs the experiment with a bias value and checks if a number of votes was found in order to
        satisfy an overall desired stability.
        """
        n = 8
        experiment = ExperimentMajorityVoteFindVotes(
            log_name=logger.logger_name,
            n=n,
            k=2,
            challenge_count=2**8,
            seed_instance=0xC0DEBA5E,
            seed_instance_noise=0xdeadbeef,
            transformation=LTFArray.transform_id,
            combiner=LTFArray.combiner_xor,
            mu=0,
            sigma=1,
            sigma_noise_ratio=NoisyLTFArray.sigma_noise_from_random_weights(
                n, 1, .5),
            seed_challenges=0xf000,
            desired_stability=0.95,
            overall_desired_stability=0.8,
            minimum_vote_count=1,
            iterations=2,
            bias=0.56)

        experiment.execute(logger.queue, logger.logger_name)

        self.assertGreaterEqual(experiment.result_overall_stab,
                                experiment.overall_desired_stability,
                                'No vote_count was found.')
예제 #8
0
    def test_reliability_set(self):
        """This method tests the reliability_statistic calculation."""
        n = 8
        k = 3
        N = 2**n
        measurements = 10
        transformation = LTFArray.transform_id
        combiner = LTFArray.combiner_xor
        instances = []
        instance_count = 3
        for i in range(instance_count):
            instance = LTFArray(
                weight_array=LTFArray.normal_weights(
                    n=n, k=k, random_instance=RandomState(0xA1A1 + i)),
                transform=transformation,
                combiner=combiner,
            )
            instances.append(instance)

        challenges = sample_inputs(n, N, random_instance=RandomState(0xFAB0))

        reliability_set = PropertyTest.reliability_set(
            instances, challenges, measurements=measurements)
        # The result is an array like with N * k entries.
        self.assertEqual(len(reliability_set), N * instance_count)
        # For noiseless simulations the all reliabilities must be 0%
        assert_array_equal(reliability_set, repeat(0.0, N * instance_count))

        noisy_instances = []
        for i in range(instance_count):
            noisy_instance = NoisyLTFArray(
                weight_array=NoisyLTFArray.normal_weights(
                    n=n, k=k, random_instance=RandomState(0xA1A1 + i)),
                transform=transformation,
                combiner=combiner,
                sigma_noise=0.5,
                random_instance=RandomState(0x5015C + i),
            )
            noisy_instances.append(noisy_instance)

        noisy_reliability_set = PropertyTest.reliability_set(
            noisy_instances, challenges, measurements=measurements)
        # For a noisy simulation the mean reliability must differ from zero
        self.assertNotEqual(mean(noisy_reliability_set), 0.0)
예제 #9
0
def example_uniqueness():
    """
    This method shows the function which can be used to calculate the uniqueness of a set of simulation instances.
    """
    n = 8
    k = 1
    instance_count = 3
    transformation = NoisyLTFArray.transform_id
    combiner = NoisyLTFArray.combiner_xor
    weights = NoisyLTFArray.normal_weights(n=n, k=k)
    instances = [
        NoisyLTFArray(
            weight_array=weights,
            transform=transformation,
            combiner=combiner,
            sigma_noise=NoisyLTFArray.sigma_noise_from_random_weights(
                n, weights)) for _ in range(instance_count)
    ]
    challenge = array([-1, 1, 1, 1, -1, 1, 1, 1])
    uniqueness = PropertyTest.uniqueness(instances, reshape(challenge, (1, n)))
    print('The uniqueness is {}.'.format(uniqueness))
예제 #10
0
    def test_bias_influence_array(self):
        """
        This method tests the influence of the bias array. The results should be different.
        """
        n = 8
        k = 4
        mu = 1
        sigma = 0.5

        challenges = tools.all_inputs(n)

        weight_array = NoisyLTFArray.normal_weights(
            n, k, mu=mu, sigma=sigma, random_instance=RandomState(0xBADA556))
        bias_array = NoisyLTFArray.normal_weights(
            1,
            k,
            mu=mu,
            sigma=sigma * 2,
            random_instance=RandomState(0xBADAFF1))

        biased_ltf_array = NoisyLTFArray(
            weight_array=weight_array,
            transform=NoisyLTFArray.transform_id,
            combiner=NoisyLTFArray.combiner_xor,
            sigma_noise=sigma,
            bias=bias_array,
        )
        ltf_array = NoisyLTFArray(
            weight_array=weight_array,
            transform=NoisyLTFArray.transform_id,
            combiner=NoisyLTFArray.combiner_xor,
            sigma_noise=sigma,
            bias=None,
        )
        self.assertEqual(ltf_array.weight_array.shape,
                         biased_ltf_array.weight_array.shape)

        bias_array = biased_ltf_array.weight_array[:, -1]
        bias_array_compared = [
            bias == bias_array[0] for bias in bias_array[1:]
        ]
        # the bias values should be different for this test. It is possible that they are all equal but this chance is
        # low.
        self.assertFalse(array(bias_array_compared).all())

        biased_responses = biased_ltf_array.eval(challenges)
        responses = ltf_array.eval(challenges)

        # The arithmetic mean of the res
        self.assertFalse(array_equal(biased_responses, responses))
예제 #11
0
def example_reliability_statistic():
    """This method shows hot to use the PropertyTest.reliability_statistic."""
    n = 8
    k = 1
    N = 2**n
    instance_count = 3
    measurements = 100
    transformation = NoisyLTFArray.transform_id
    combiner = NoisyLTFArray.combiner_xor
    weights = NoisyLTFArray.normal_weights(n=n, k=k)
    instances = [
        NoisyLTFArray(
            weight_array=weights,
            transform=transformation,
            combiner=combiner,
            sigma_noise=NoisyLTFArray.sigma_noise_from_random_weights(n, 0.5))
        for _ in range(instance_count)
    ]
    challenges = array(list(sample_inputs(n, N)))
    property_test = PropertyTest(instances)
    reliability_statistic = property_test.reliability_statistic(
        challenges, measurements=measurements)
    print('The reliability statistic is {}.'.format(reliability_statistic))
예제 #12
0
 def __init__(self, n: int, k: int, seed: int = None, transform=None, noisiness: float = 0, noise_seed: int = None):
     random_instance = RandomState(seed=seed) if seed is not None else RandomState()
     super().__init__(
         weight_array=self.normal_weights(n=n, k=k, random_instance=random_instance),
         transform=transform or LTFArray.transform_atf,
         combiner=LTFArray.combiner_xor,
         sigma_noise=NoisyLTFArray.sigma_noise_from_random_weights(
             n=n,
             sigma_weight=1,
             noisiness=noisiness,
         ),
         random_instance=RandomState(seed=noise_seed) if noise_seed else RandomState()
     )
     self.n = n
     self.k = k
예제 #13
0
def example_uniqueness_statistic():
    """This method shows the uniqueness statistic function."""
    n = 8
    k = 1
    N = 2**n
    instance_count = 11
    measurements = 1
    transformation = NoisyLTFArray.transform_id
    combiner = NoisyLTFArray.combiner_xor
    weights = NoisyLTFArray.normal_weights(n=n, k=k)
    instances = [
        NoisyLTFArray(
            weight_array=weights,
            transform=transformation,
            combiner=combiner,
            sigma_noise=NoisyLTFArray.sigma_noise_from_random_weights(
                n, weights)) for _ in range(instance_count)
    ]

    challenges = array(list(sample_inputs(n, N)))
    property_test = PropertyTest(instances)
    uniqueness_statistic = property_test.uniqueness_statistic(
        challenges, measurements=measurements)
    print('The uniqueness statistic is {}.'.format(uniqueness_statistic))
예제 #14
0
 def create_noisy_ltf_arrays(cls, n=8, k=1, instance_count=10, transformation=LTFArray.transform_id,
                             combiner=LTFArray.combiner_xor, bias=None, mu=0, sigma=1, weight_random_seed=0x123,
                             sigma_noise=0.5, noise_random_seed=0x321):
     """
     This function can be used to create a list of NoisyLTFArray.
     :param n: int
               Number of stages of the PUF
     :param k: int
               Number different LTFArrays
     :param instance_count: int
                            Number of simulations to be instantiated.
     :param transformation: A function: array of int with shape(N,k,n), int number of PUFs k -> shape(N,k,n)
                            The function transforms input challenges in order to increase resistance against attacks.
     :param combiner: A function: array of int with shape(N,k,n) -> array of in with shape(N)
                      The functions combines the outputs of k PUFs to one bit results,
                      in oder to increase resistance against attacks.
     :param bias: None, float or a two dimensional array of float with shape (k, 1)
                  This bias value or array of bias values will be appended to the weight_array.
                  Use a single value if you want the same bias for all weight_vectors.
     :param mu: float
                Mean (“centre”) of the stage weight distribution of the PUF instance simulation.
     :param sigma: float
                   Standard deviation of the stage weight distribution of the PUF instance simulation.
     :param weight_random_seed: int
                                The seed which is used to initialize the pseudo-random number generator
                                which is used to generate the stage weights for the arbiter PUF simulation.
     :param sigma_noise: float
                         Standard deviation of the noise distribution.
     :param noise_random_seed: int
                               The seed which is used to initialize the pseudo-random number generator
                               which is used to generate the noise for the arbiter PUF simulation.
     :return: list of pypuf.simulation.arbiter_based.ltfarray.NoisyLTFArray
     """
     instances = []
     for seed_offset in range(instance_count):
         weight_array = LTFArray.normal_weights(n, k, mu, sigma,
                                                random_instance=RandomState(weight_random_seed + seed_offset))
         instances.append(
             NoisyLTFArray(
                 weight_array=weight_array,
                 transform=transformation,
                 combiner=combiner,
                 sigma_noise=sigma_noise,
                 random_instance=RandomState(noise_random_seed + seed_offset),
                 bias=bias,
             )
         )
     return instances
예제 #15
0
    def test_reliability_statistic(self):
        """This method tests the reliability statistic of an instance set."""
        n = 8
        k = 1
        N = 2**n
        instance_count = 2
        measurements = 100
        transformation = LTFArray.transform_id
        combiner = LTFArray.combiner_xor

        instances = [
            LTFArray(weight_array=LTFArray.normal_weights(
                n=n, k=k, random_instance=RandomState(0xA1A1 + i)),
                     transform=transformation,
                     combiner=combiner) for i in range(instance_count)
        ]

        challenges = sample_inputs(n, N, random_instance=RandomState(0xFAB10))

        property_test = PropertyTest(instances)
        reliability_statistic = property_test.reliability_statistic(
            challenges, measurements=measurements)
        # For an noiseless set of simulations the reliability must be 0%
        for key, value in reliability_statistic.items():
            if key == 'sv':
                self.assertEqual(value, 0.0, '{}'.format(key))
            elif key == 'samples':
                self.assertEqual(len(value), instance_count * N,
                                 '{}'.format(key))
            else:
                self.assertEqual(value, 0.0, '{}'.format(key))

        noisy_instances = [
            NoisyLTFArray(
                weight_array=LTFArray.normal_weights(
                    n=n, k=k, random_instance=RandomState(0xA1A1 + i)),
                transform=transformation,
                combiner=combiner,
                sigma_noise=0.5,
                random_instance=RandomState(0xCABE),
            ) for i in range(instance_count)
        ]

        noisy_property_test = PropertyTest(noisy_instances)
        noisy_reliability_statistic = noisy_property_test.reliability_statistic(
            challenges, measurements=measurements)
        self.assertNotEqual(noisy_reliability_statistic['mean'], 0.0)
예제 #16
0
    def run(self):
        random = RandomState(seed=self.parameters.seed)
        sigma_noise = NoisyLTFArray.sigma_noise_from_random_weights(
            self.parameters.n, 1, self.parameters.sigma_noise_ratio)
        weights = LTFArray.normal_weights(self.parameters.n,
                                          self.parameters.k,
                                          random_instance=random)
        instance_mv = SimulationMajorityLTFArray(
            weights,
            LTFArray.transform_atf,
            LTFArray.combiner_xor,
            sigma_noise,
            random_instance_noise=random,
            vote_count=self.parameters.vote_count)

        self.stability = approx_stabilities(instance_mv, self.parameters.N,
                                            self.parameters.samples, random)
예제 #17
0
    def test_run_and_analyze(self):
        logger_name = 'log'

        # Setup multiprocessing logging
        queue = multiprocessing.Queue(-1)
        listener = multiprocessing.Process(target=log_listener,
                                           args=(
                                               queue,
                                               setup_logger,
                                               logger_name,
                                           ))
        listener.start()

        n = 8
        experiment = ExperimentMajorityVoteFindVotes(
            log_name=logger_name,
            n=n,
            k=2,
            challenge_count=2**8,
            seed_instance=0xC0DEBA5E,
            seed_instance_noise=0xdeadbeef,
            transformation=LTFArray.transform_id,
            combiner=LTFArray.combiner_xor,
            mu=0,
            sigma=1,
            sigma_noise_ratio=NoisyLTFArray.sigma_noise_from_random_weights(
                n, 1, .5),
            seed_challenges=0xf000,
            desired_stability=0.95,
            overall_desired_stability=0.8,
            minimum_vote_count=1,
            iterations=2,
            bias=False)
        experiment.execute(queue, logger_name)

        self.assertGreaterEqual(experiment.result_overall_stab,
                                experiment.overall_desired_stability,
                                'No vote_count was found.')
        queue.put_nowait(None)
        listener.join()
예제 #18
0
def stability_figure_data(n, k, vote_count, sigma_noise_ratio, num, reps, random):
    """
    Returns a list of stabilities for randomly chosen challenges of randomly chosen MV XOR Arbiter PUF.
    :param n: Length of arbiter chains
    :param k: Number of arbiter chains
    :param vote_count: Number of votes for each chain
    :param sigma_noise_ratio: sigma_noise to sigma_model ratio of the arbiter chains
    :param num: number of challenges to compute stability for
    :param reps: number of samples per challenge to base the stability computation on
    :param random: random seed for all PRNG used here
    """
    sigma_noise = NoisyLTFArray.sigma_noise_from_random_weights(n, 1, sigma_noise_ratio)
    weights = LTFArray.normal_weights(n, k, random_instance=random)
    instance_mv = SimulationMajorityLTFArray(weights,
                                             LTFArray.transform_atf,
                                             LTFArray.combiner_xor,
                                             sigma_noise,
                                             random_instance_noise=random,
                                             vote_count=vote_count)

    stabilities = tools.approx_stabilities(instance_mv, num, reps, random)
    print('{' + ','.join(map(str, stabilities)) + '}')
예제 #19
0
    def test_bias_influence_value(self):
        """
        This method tests the influence of the bias value. The results should be different.
        """
        n = 8
        k = 4
        mu = 1
        sigma = 0.5

        challenges = array(list(tools.all_inputs(n)))

        weight_array = NoisyLTFArray.normal_weights(n, k, mu=mu, sigma=sigma, random_instance=RandomState(0xBADA556))
        bias_value = 2.5

        biased_ltf_array = NoisyLTFArray(
            weight_array=weight_array,
            transform=NoisyLTFArray.transform_id,
            combiner=NoisyLTFArray.combiner_xor,
            sigma_noise=sigma,
            bias=bias_value,
        )
        ltf_array = NoisyLTFArray(
            weight_array=weight_array,
            transform=NoisyLTFArray.transform_id,
            combiner=NoisyLTFArray.combiner_xor,
            sigma_noise=sigma,
            bias=None,
        )
        # the second dimension of the weight_array plus one must be the number of elements in biased weight_array
        self.assertEqual(shape(ltf_array.weight_array)[1]+1, shape(biased_ltf_array.weight_array)[1])

        bias_array = biased_ltf_array.weight_array[:, -1]
        bias_array_compared = [bias == bias_array[0] for bias in bias_array]
        # the bias values should be equal for this test.
        self.assertTrue(array(list(bias_array_compared)).all())

        biased_responses = biased_ltf_array.eval(challenges)
        responses = ltf_array.eval(challenges)

        # The arithmetic mean of the res
        self.assertFalse(array_equal(biased_responses, responses))
예제 #20
0
 def __init__(self,
              log_name,
              n,
              k,
              challenge_count,
              seed_instance,
              seed_instance_noise,
              transformation,
              combiner,
              mu,
              sigma,
              sigma_noise_ratio,
              seed_challenges,
              desired_stability,
              overall_desired_stability,
              minimum_vote_count,
              iterations,
              bias=False):
     """
     :param log_name: string
                      The prefix of the self.progress_logger.
     :param n: int
               The number of stages of the PUF.
     :param k: int
               The number different LTFArrays of the SimulationMajorityLTFArray.
     :param challenge_count: int
                             The number of challenges which are used to evaluate the PUF.
     :param seed_instance: int
                           The seed which is used to initialize the pseudo-random number generator
                           which is used to generate the stage weights for the arbiter PUF simulation.
     :param seed_instance_noise: int
                                 The random seed which is used to initialize the pseudo-random number
                                 generator which is used to generate the noise for the arbiter PUF simulation.
     :param transformation: A function: array of int with shape(N,k,n), int number of PUFs k -> shape(N,k,n)
                            The function transforms input challenges in order to increase resistance against attacks.
     :param combiner: A function: array of int with shape(N,k,n) -> array of in with shape(N)
                      The functions combines the outputs of k PUFs to one bit results,
                      in oder to increase resistance against attacks.
     :param mu: float
                Mean (“centre”) of the stage weight distribution of the PUF instance simulation.
     :param sigma: float
                   Standard deviation of the stage weight distribution of the PUF instance simulation.
     :param sigma_noise_ratio: float
                               The noisiness factor which is used to scale sigma_noise. The value sigma_noise
                               is the standard deviation of the stage weight distribution of the PUF instance
                               simulation.
     
     :param seed_challenges: int
                             The seed which is used to initialize the pseudo-random number generator which
                             is used to generate challenges.
     :param desired_stability: float
                               The number which is used to decide whether a PUF is stable or not.
     :param overall_desired_stability: float
                                       The relative frequency of challenges which are greater equal than the
                                       desired_stability.
     :param minimum_vote_count: int
                    That is the minimum number of votes which are used to evaluate the SimulationMajorityLTFArray
                    instance.
     :param iterations: int
                        The number of evaluations of the SimulationMajorityLTFArray instance which are used
                        to check the desired_stability.
     :param bias: boolean
                  The value is used to turn on input/output distort of the  PUF instance simulation.
     """
     super().__init__(log_name='%s.0x%x_0_%i_%i_%i_%s_%s' % (
         log_name,
         seed_instance,
         n,
         k,
         challenge_count,
         transformation.__name__,
         combiner.__name__,
     ), )
     self.n = n
     self.k = k
     self.N = challenge_count
     self.seed_instance = seed_instance
     self.seed_instance_noise = seed_instance_noise
     self.seed_challenges = seed_challenges
     self.transformation = transformation
     self.combiner = combiner
     self.mu = mu
     self.sigma = sigma
     self.sigma_noise = NoisyLTFArray.sigma_noise_from_random_weights(
         n, 1, sigma_noise_ratio)
     self.bias = bias
     self.desired_stability = desired_stability
     self.overall_desired_stability = overall_desired_stability
     self.minimum_vote_count = minimum_vote_count
     self.maximum_vote_count = 0  # Upper bound for binary search calculated in run()
     self.vote_count = 0  # That is calculated during run()
     self.result_overall_stab = 0.0
     self.result_vote_count = 0
     self.iterations = iterations
     self.overall_stab = 0.0  # That is the overall_stab for vote_count calculated during run()
예제 #21
0
 def prepare(self):
     self.sigma_noise = NoisyLTFArray.sigma_noise_from_random_weights(
         self.parameters.challenge_count, 1, self.parameters.sigma_noise_ratio)