Esempio n. 1
0
 def test_init_normal_empirical(self):
     """
     Test if initialization by intra distance yields the desired intra distance.
     """
     for intra_dist in [.1, .2, .3]:
         nla = NoisyLTFArray.init_normal_empirical(
             32,
             1,
             NoisyLTFArray.transform_id,
             NoisyLTFArray.combiner_xor,
             intra_dist,
             approx_threshold=.01,
             random_instance=RandomState(0xbeef))
         self.assertTrue(
             abs(tools.approx_dist(nla, nla, 10000) - intra_dist) < .02)
     for intra_dist in [.1, .2, .3]:
         nla = NoisyLTFArray.init_normal_empirical(
             64,
             4,
             NoisyLTFArray.transform_id,
             NoisyLTFArray.combiner_xor,
             intra_dist,
             approx_threshold=.1,
             random_instance=RandomState(0xbeef))
         self.assertTrue(
             abs(tools.approx_dist(nla, nla, 10000) - intra_dist) < .15)
Esempio n. 2
0
    def init_normal_empirical(n,
                              k,
                              transform,
                              combiner,
                              intra_dist,
                              random_instance=RandomState(),
                              bias=None,
                              approx_threshold=.1):
        """
        Initializes a NoisyLTFArray with given parameters that can be expected to have the given intra_dist.
        :param n: length of challenges
        :param k: number of LTFs in the array
        :param transform: input transformation for the LTF array
        :param combiner: function mapping the individual output bits to one output bit
        :param intra_dist: desired intra_dist, defined as the probability to see same output on two evaluations using
                           the same challenge.
        :param random_instance: pseudorandom generator to be used
        :param bias: bias of the LTF array
        :return: NoisyLTFArray
        """
        assert intra_dist > 0

        instance = NoisyLTFArray(
            weight_array=LTFArray.normal_weights(
                n, k, random_instance=random_instance),
            transform=transform,
            combiner=combiner,
            sigma_noise=1,
            random_instance=random_instance,
            bias=bias,
        )

        # double max_sigma_noise until large enough
        while tools.approx_dist(instance, instance, 1000) < intra_dist:
            instance.sigma_noise *= 2
        min_sigma_noise = 0
        max_sigma_noise = 2 * instance.sigma_noise

        # binary search in [0, max_sigma_noise]
        instance.sigma_noise = (max_sigma_noise + min_sigma_noise) / 2
        estimation_distance = tools.approx_dist(instance, instance, 10000,
                                                random_instance)
        while abs(intra_dist - estimation_distance) > approx_threshold:

            # update interval bounds
            if estimation_distance > intra_dist:
                max_sigma_noise = instance.sigma_noise
            elif estimation_distance <= intra_dist:
                min_sigma_noise = instance.sigma_noise

            # update instance and estimated distance
            instance.sigma_noise = (max_sigma_noise + min_sigma_noise) / 2
            estimation_distance = tools.approx_dist(instance, instance, 10000,
                                                    random_instance)

        return instance
Esempio n. 3
0
 def analyze(self):
     self.progress_logger.debug('Analyzing result')
     accuracy = -1 if not self.model else 1.0 - tools.approx_dist(
         instance1=self.simulation,
         instance2=self.model,
         num=10 ** 4,
         random_instance=RandomState(seed=self.parameters.seed),
     )
     return Result(
         name=self.NAME,
         n=self.parameters.simulation.n,
         first_k=self.parameters.simulation.k,
         num_chains=self.parameters.simulation.chains,
         num_xors=self.parameters.simulation.xors,
         num_interposings=self.parameters.simulation.interposings,
         experiment_id=self.id,
         pid=getpid(),
         measured_time=self.measured_time,
         iterations=-1 if not self.model else self.learner.nn.n_iter_,
         accuracy=accuracy,
         accuracy_relative=accuracy / self.reliability,
         stability=self.stability,
         reliability=self.reliability,
         loss_curve=[-1] if not self.model else [round(loss, 3) for loss in self.learner.nn.loss_curve_],
         accuracy_curve=[-1] if not self.model else [round(accuracy, 3) for accuracy in self.learner.accuracy_curve],
         max_memory=self.max_memory(),
     )
Esempio n. 4
0
def train(challenges, responses, t_pairs):
    try:
        with open('weights.txt', 'rb') as f:
            weights = np.load(f)
    except:
        print("[*] ENOWEIGHTS")

    # create instance frome same weights for accuracy calculation
    instance = LTFArray(
        weight_array=weights,
        transform=LTFArray.transform_atf,
        combiner=LTFArray.combiner_xor,
    )

    # train model from obtained CRPs
    training_set = tools.ChallengeResponseSet(challenges, responses)
    lr_learner = LogisticRegression(
        t_set=training_set,
        n=48,
        k=4,
        transformation=LTFArray.transform_atf,
        combiner=LTFArray.combiner_xor,
    )

    # learn and test the model
    model = lr_learner.learn()
    accuracy = 1 - tools.approx_dist(instance, model, 10000)

    print('Learned a 48-bit 4-xor XOR Arbiter PUF from {} CRPs with accuracy {}'.format(t_pairs, accuracy))
    
    return model
Esempio n. 5
0
    def analyze(self):
        """
        Analyzes the learned result.
        """
        assert self.model is not None
        accuracy = 1.0 - tools.approx_dist(
            self.instance,
            self.model,
            min(10000, 2**self.parameters.n),
            random_instance=RandomState(self.parameters.seed_distance),
        )

        return Result(
            experiment_id=self.id,
            pid=getpid(),
            iteration_count=self.learner.iteration_count,
            epoch_count=self.learner.epoch_count,
            gradient_step_count=self.learner.gradient_step_count,
            measured_time=self.measured_time,
            accuracy=accuracy,
            model=self.model.weight_array.flatten() /
            norm(self.model.weight_array.flatten()),
            transformation_name=self.instance.transform.__name__,
            memory_rss_max=self.max_memory(),
        )
Esempio n. 6
0
def main():
    """
    Run an example how to use pypuf.
    Developers Notice: Changes here need to be mirrored to README!
    """
    
    # create a simulation with random (Gaussian) weights
    # for 64-bit 4-XOR
    instance = LTFArray(
        weight_array=LTFArray.normal_weights(n=64, k=2),
        transform=LTFArray.transform_atf,
        combiner=LTFArray.combiner_xor,
    )

    # create the learner
    lr_learner = LogisticRegression(
        t_set=tools.TrainingSet(instance=instance, N=12000),
        n=64,
        k=2,
        transformation=LTFArray.transform_atf,
        combiner=LTFArray.combiner_xor,
    )

    # learn and test the model
    model = lr_learner.learn()
    accuracy = 1 - tools.approx_dist(instance, model, 10000)

    # output the result
    print('Learned a 64bit 2-xor XOR Arbiter PUF from 12000 CRPs with accuracy %f' % accuracy)
Esempio n. 7
0
    def analyze(self):
        """
        Analyzes the learned result.
        """
        assert self.model is not None

        accuracy = 1.0 - approx_dist(self.instance, self.model,
                                     min(10000, 2**self.n), self.distance_prng)

        correct_iteration = None
        if self.learner.total_permutation_iterations > 0:
            correct_iteration = self.find_correct_permutation(
                self.learner.initial_model.weight_array)

        return Result(
            experiment_id=self.id,
            pid=getpid(),
            measured_time=self.measured_time,
            initial_model=self.learner.initial_model,
            initial_lr_iterations=self.learner.initial_lr_iterations,
            initial_accuracy=self.learner.initial_accuracy,
            correct_permutation=correct_iteration,
            best_permutation=self.learner.best_permutation,
            best_permutation_iteration=self.learner.best_permutation_iteration,
            total_permutation_iterations=self.learner.
            total_permutation_iterations,
            total_lr_iterations=self.learner.total_lr_iterations,
            model=self.model,
            accuracy=accuracy)
Esempio n. 8
0
    def analyze(self):
        """
        Analyzes the learned result.
        """
        assert self.model is not None

        self.result_logger.info(
            # seed_instance  seed_model i      n      k      N      trans  comb   iter   time   accuracy  model values
            '0x%x\t'        '0x%x\t'   '%i\t' '%i\t' '%i\t' '%i\t' '%s\t' '%s\t' '%i\t' '%f\t' '%f\t'    '%s',
            self.seed_instance,
            self.seed_model,
            0,  # restart count, kept for compatibility to old log files
            self.n,
            self.k,
            self.N,
            self.transformation.__name__,
            self.combiner.__name__,
            self.learner.iteration_count,
            self.measured_time,
            1.0 - tools.approx_dist(
                self.instance,
                self.model,
                min(10000, 2 ** self.n),
                random_instance=self.distance_prng,
            ),
            ','.join(map(str, self.model.weight_array.flatten() / norm(self.model.weight_array.flatten())))

        )
Esempio n. 9
0
    def learn(self):
        model = LTFArray(
            weight_array=LTFArray.normal_weights(self.n, self.k,
                                                 self.weights_mu,
                                                 self.weights_sigma,
                                                 self.weights_prng),
            transform=self.transformation,
            combiner=self.combiner,
        )
        self.iteration_count = 0
        challenges = []
        responses = []

        challenges.append(ones(self.n))
        responses.append(
            self.__signum(sum(self.orig_LTFArray.weight_array * challenges)))

        while self.iteration_count < self.iteration_limit:

            self.__updateModel(model)
            stderr.write('\riter %5i         \n' % (self.iteration_count))
            self.iteration_count += 1
            [center, radius] = self.__chebyshev_center(challenges, responses)
            stderr.write("radius ")
            stderr.write("%f\n" % radius)
            stderr.write("distance ")

            model.weight_array = [center]
            distance = tools.approx_dist(self.orig_LTFArray, model,
                                         min(10000, 2**model.n))
            self.min_distance = min(distance, self.min_distance)
            if (distance < 0.01):
                break
            minAccuracy = abs(radius * sqrt(model.n))
            stderr.write("%f\n" % distance)
            newC = self.__closest_challenge(center, minAccuracy)
            challenges.append(newC)
            responses.append(
                self.__signum(sum(newC * self.orig_LTFArray.weight_array)))

        return model
Esempio n. 10
0
 def prepare(self):
     self.stability = 1.0 - tools.approx_dist(
         instance1=self.simulation,
         instance2=self.simulation,
         num=10 ** 4,
         random_instance=RandomState(seed=self.parameters.seed),
     )
     self.stability = max(self.stability, 1 - self.stability)
     self.reliability = (1 + sqrt(2 * self.stability - 1)) / 2    # estimation of non-noisy vs. noisy
     self.progress_logger.debug(f'Gathering training set with {self.parameters.N} examples')
     self.training_set = tools.TrainingSet(
         instance=self.simulation,
         N=self.parameters.N,
         random_instance=RandomState(seed=self.parameters.seed),
     )
     self.progress_logger.debug('Setting up learner')
     self.learner = MultiLayerPerceptronScikitLearn(
         n=self.parameters.simulation.n,
         k=self.parameters.simulation.k,
         training_set=self.training_set,
         validation_frac=self.parameters.validation_frac,
         transformation=LTFArray.transform_atf,
         preprocessing='short',
         layers=self.parameters.layers,
         learning_rate=self.parameters.learning_rate,
         penalty=0.0002,
         beta_1=0.9,
         beta_2=0.999,
         tolerance=self.parameters.tolerance,
         patience=self.parameters.patience,
         iteration_limit=self.parameters.iteration_limit,
         batch_size=self.parameters.batch_size,
         seed_model=self.parameters.seed,
         print_learning=False,
         logger=self.progress_logger.debug,
         goal=0.95 * self.reliability,
     )
     self.learner.prepare()
Esempio n. 11
0
def train():
    instance = LTFArray(
        weight_array=LTFArray.normal_weights(n=48, k=4),
        transform=LTFArray.transform_atf,
        combiner=LTFArray.combiner_xor,
    )

    N = 18000

    # learn and test the model
    lr_learner = LogisticRegression(
        t_set=tools.TrainingSet(instance=instance, N=N),
        n=48,
        k=4,
        transformation=LTFArray.transform_atf,
        combiner=LTFArray.combiner_xor,
    )

    model = lr_learner.learn()
    accuracy = 1 - tools.approx_dist(instance, model, 10000)

    print(
        'Learned a 48bit 4-xor XOR Arbiter PUF from %d CRPs with accuracy %f' %
        (N, accuracy))
Esempio n. 12
0
                k,
                transformation=transformation,
                combiner=combiner,
                weights_prng=model_prng,
                )

    i = 0
    dist = 1

    while i < restarts and 1 - dist < convergence:
        stderr.write('\r%5i/%5i         ' % (i+1, restarts if restarts < float('inf') else 0))
        start = time.time()
        model = learner.learn()
        end = time.time()
        training_times = append(training_times, end - start)
        dist = tools.approx_dist(instance, model, min(10000, 2 ** n))
        accuracy = append(accuracy, 1 - dist)
        iterations = append(iterations, learner.iteration_count)
        # output test result in machine-friendly format
        # seed_ltf seed_model idx_restart n k N transformation combiner iteration_count time accuracy
        stdout.write(' '.join(
            [
                '0x%x' % seed_instance,
                '0x%x' % seed_model,
                '%5d' % i,
                '%3d' % n,
                '%2d' % k,
                '%6d' % N,
                '%s' % transformation_name,
                '%s' % combiner_name,
                '%4d' % learner.iteration_count,
Esempio n. 13
0
def main(args):

    if len(args) != 10:
        stderr.write('LTF Array Simulator and Logistic Regression Learner\n')
        stderr.write('Usage:\n')
        stderr.write(
            'sim_learn.py n k transformation combiner N restarts seed_instance seed_model\n'
        )
        stderr.write('               n: number of bits per Arbiter chain\n')
        stderr.write('               k: number of Arbiter chains\n')
        stderr.write(
            '  transformation: used to transform input before it is used in LTFs\n'
        )
        stderr.write('                  currently available:\n')
        stderr.write('                  - id  -- does nothing at all\n')
        stderr.write(
            '                  - atf -- convert according to "natural" Arbiter chain\n'
        )
        stderr.write('                           implementation\n')
        stderr.write(
            '                  - mm  -- designed to achieve maximum PTF expansion length\n'
        )
        stderr.write(
            '                           only implemented for k=2 and even n\n')
        stderr.write(
            '                  - lightweight_secure -- design by Majzoobi et al. 2008\n'
        )
        stderr.write(
            '                                          only implemented for even n\n'
        )
        stderr.write(
            '                  - shift_lightweight_secure -- design like Majzoobi\n'
        )
        stderr.write(
            '                                                et al. 2008, but with the shift\n'
        )
        stderr.write(
            '                                                operation executed first\n'
        )
        stderr.write(
            '                                                only implemented for even n\n'
        )
        stderr.write(
            '                  - soelter_lightweight_secure -- design like Majzoobi\n'
        )
        stderr.write(
            '                                                  et al. 2008, but one bit different\n'
        )
        stderr.write(
            '                                                  only implemented for even n\n'
        )
        stderr.write(
            '                  - 1_n_bent -- one LTF gets "bent" input, the others id\n'
        )
        stderr.write(
            '                  - 1_1_bent -- one bit gets "bent" input, the others id,\n'
        )
        stderr.write(
            '                                this is proven to have maximum PTF\n'
        )
        stderr.write('                                length for the model\n')
        stderr.write(
            '                  - polynomial -- challenges are interpreted as polynomials\n'
        )
        stderr.write(
            '                                  from GF(2^64). From the initial challenge c,\n'
        )
        stderr.write(
            '                                  the i-th Arbiter chain gets the coefficients \n'
        )
        stderr.write(
            '                                  of the polynomial c^(i+1) as challenge.\n'
        )
        stderr.write(
            '                                  For now only challenges with length n=64 are accepted.\n'
        )
        stderr.write(
            '                  - permutation_atf -- for each Arbiter chain first a pseudorandom permutation \n'
        )
        stderr.write(
            '                                       is applied and thereafter the ATF transform.\n'
        )
        stderr.write(
            '                  - random -- Each Arbiter chain gets a random challenge derived from the\n'
        )
        stderr.write(
            '                              original challenge using a PRNG.\n')
        stderr.write(
            '        combiner: used to combine the output bits to a single bit\n'
        )
        stderr.write('                  currently available:\n')
        stderr.write(
            '                  - xor     -- output the parity of all output bits\n'
        )
        stderr.write(
            '                  - ip_mod2 -- output the inner product mod 2 of all output\n'
        )
        stderr.write('                               bits (even n only)\n')
        stderr.write(
            '               N: number of challenge response pairs in the training set\n'
        )
        stderr.write(
            '        restarts: number of repeated initializations the learner\n'
        )
        stderr.write(
            '                  use float number x, 0<x<1 to repeat until given accuracy\n'
        )
        stderr.write(
            '       instances: number of repeated initializations the instance\n'
        )
        stderr.write(
            '                  The number total learning attempts is restarts*instances.\n'
        )
        stderr.write(
            '   seed_instance: random seed used for LTF array instance\n')
        stderr.write(
            '      seed_model: random seed used for the model in first learning attempt\n'
        )
        quit(1)

    n = int(args[1])
    k = int(args[2])
    transformation_name = args[3]
    combiner_name = args[4]
    N = int(args[5])

    if float(args[6]) < 1:
        restarts = float('inf')
        convergence = float(args[6])
    else:
        restarts = int(args[6])
        convergence = 1.1

    instances = int(args[7])

    seed_instance = int(args[8], 16)
    seed_model = int(args[9], 16)

    # reproduce 'random' numbers and avoid interference with other random numbers drawn
    instance_prng = RandomState(seed=seed_instance)
    model_prng = RandomState(seed=seed_model)

    transformation = None
    combiner = None

    try:
        transformation = getattr(LTFArray,
                                 'transform_%s' % transformation_name)
    except AttributeError:
        stderr.write(
            'Transformation %s unknown or currently not implemented\n' %
            transformation_name)
        quit()

    try:
        combiner = getattr(LTFArray, 'combiner_%s' % combiner_name)
    except AttributeError:
        stderr.write('Combiner %s unknown or currently not implemented\n' %
                     combiner_name)
        quit()

    stderr.write(
        'Learning %s-bit %s XOR Arbiter PUF with %s CRPs and %s restarts.\n\n'
        % (n, k, N, restarts))
    stderr.write('Using\n')
    stderr.write('  transformation:       %s\n' % transformation)
    stderr.write('  combiner:             %s\n' % combiner)
    stderr.write('  instance random seed: 0x%x\n' % seed_instance)
    stderr.write('  model random seed:    0x%x\n' % seed_model)
    stderr.write('\n')

    accuracy = array([])
    training_times = array([])
    iterations = array([])

    for j in range(instances):

        stderr.write('----------- Choosing new instance. ---------\n')

        instance = LTFArray(
            weight_array=LTFArray.normal_weights(
                n, k, random_instance=instance_prng),
            transform=transformation,
            combiner=combiner,
        )

        lr_learner = LogisticRegression(
            tools.TrainingSet(instance=instance, N=N),
            n,
            k,
            transformation=transformation,
            combiner=combiner,
            weights_prng=model_prng,
        )

        i = 0
        dist = 1

        while i < restarts and 1 - dist < convergence:
            stderr.write('\r%5i/%5i         ' %
                         (i + 1, restarts if restarts < float('inf') else 0))
            start = time.time()
            model = lr_learner.learn()
            end = time.time()
            training_times = append(training_times, end - start)
            dist = tools.approx_dist(instance, model, min(10000, 2**n))
            accuracy = append(accuracy, 1 - dist)
            iterations = append(iterations, lr_learner.iteration_count)
            # output test result in machine-friendly format
            # seed_ltf seed_model idx_restart n k N transformation combiner iteration_count time accuracy
            stderr.write(' '.join([
                '0x%x' % seed_instance,
                '0x%x' % seed_model,
                '%5d' % i,
                '%3d' % n,
                '%2d' % k,
                '%6d' % N,
                '%s' % transformation_name,
                '%s' % combiner_name,
                '%4d' % lr_learner.iteration_count,
                '%9.3f' % (end - start),
                '%1.5f' % (1 - dist),
            ]) + '\n')
            #stderr.write('training time:                % 5.3fs' % (end - start))
            #stderr.write('min training distance:        % 5.3f' % lr_learner.min_distance)
            #stderr.write('test distance (1000 samples): % 5.3f\n' % dist)
            i += 1

    stderr.write('\r              \r')
    stderr.write('\n\n')
    stderr.write('training times: %s\n' % training_times)
    stderr.write('iterations: %s\n' % iterations)
    stderr.write('test accuracy: %s\n' % accuracy)
    stderr.write('\n\n')
    stderr.write(
        'min/avg/max training time  : % 9.3fs /% 9.3fs /% 9.3fs\n' %
        (amin(training_times), mean(training_times), amax(training_times)))
    stderr.write('min/avg/max iteration count: % 9.3f  /% 9.3f  /% 9.3f \n' %
                 (amin(iterations), mean(iterations), amax(iterations)))
    stderr.write('min/avg/max test accuracy  : % 9.3f  /% 9.3f  /% 9.3f \n' %
                 (amin(accuracy), mean(accuracy), amax(accuracy)))
    stderr.write('\n\n')