Exemplo n.º 1
0
 def prepare(self):
     """
     Initializes the instance, the training set and the learner to then run the logistic regression
     with the given parameters.
     """
     self.instance = LTFArray(
         weight_array=LTFArray.normal_weights(
             self.parameters.n,
             self.parameters.k,
             random_instance=RandomState(
                 seed=self.parameters.seed_instance)),
         transform=self.parameters.transformation,
         combiner=self.parameters.combiner,
     )
     self.learner = LogisticRegression(
         tools.TrainingSet(instance=self.instance,
                           N=self.parameters.N,
                           random_instance=RandomState(
                               self.parameters.seed_challenge)),
         self.parameters.n,
         self.parameters.k,
         transformation=self.instance.transform,
         combiner=self.instance.combiner,
         weights_prng=RandomState(seed=self.parameters.seed_model),
         logger=self.progress_logger,
         minibatch_size=self.parameters.mini_batch_size,
         convergance_decimals=self.parameters.convergence_decimals or 2,
         shuffle=self.parameters.shuffle,
     )
Exemplo n.º 2
0
def pipeline(N):
    instance = LTFArray(
        weight_array=LTFArray.normal_weights(n=64, k=2),
        transform=LTFArray.transform_atf,
        combiner=LTFArray.combiner_xor
    )

    train_set = tools.TrainingSet(instance=instance, N=N)

    train_size = int(len(train_set.challenges) * 0.95)

    val_set = train_set.subset(slice(train_size, None))
    train_set = train_set.subset(slice(None, train_size))
    
    lr_learner = LogisticRegression(
        t_set=train_set,
        n=64,
        k=2,
        transformation=LTFArray.transform_atf,
        combiner=LTFArray.combiner_xor,
    )

    model = lr_learner.learn()
    
    val_set_predicted_responses = model.eval(val_set.challenges)

    accuracy = accuracy_score(val_set_predicted_responses, val_set.responses)
    
    return accuracy
Exemplo n.º 3
0
def main():
    """
    Run an example how to use pypuf.
    Developers Notice: Changes here need to be mirrored to README!
    """
    
    # create a simulation with random (Gaussian) weights
    # for 64-bit 4-XOR
    instance = LTFArray(
        weight_array=LTFArray.normal_weights(n=64, k=2),
        transform=LTFArray.transform_atf,
        combiner=LTFArray.combiner_xor,
    )

    # create the learner
    lr_learner = LogisticRegression(
        t_set=tools.TrainingSet(instance=instance, N=12000),
        n=64,
        k=2,
        transformation=LTFArray.transform_atf,
        combiner=LTFArray.combiner_xor,
    )

    # learn and test the model
    model = lr_learner.learn()
    accuracy = 1 - tools.approx_dist(instance, model, 10000)

    # output the result
    print('Learned a 64bit 2-xor XOR Arbiter PUF from 12000 CRPs with accuracy %f' % accuracy)
Exemplo n.º 4
0
 def run(self):
     """
     Initializes the instance, the training set and the learner to then run the logistic regression
     with the given parameters.
     """
     # TODO input transformation is computed twice. Add a shortcut to recycle results from the first computation
     self.instance = LTFArray(
         weight_array=LTFArray.normal_weights(self.n, self.k, random_instance=self.instance_prng),
         transform=self.transformation,
         combiner=self.combiner,
     )
     self.learner = LogisticRegression(
         tools.TrainingSet(instance=self.instance, N=self.N, random_instance=self.challenge_prng),
         self.n,
         self.k,
         transformation=self.transformation,
         combiner=self.combiner,
         weights_prng=self.model_prng,
         logger=self.progress_logger,
     )
     self.model = self.learner.learn()
Exemplo n.º 5
0
 def prepare(self):
     self.stability = 1.0 - tools.approx_dist(
         instance1=self.simulation,
         instance2=self.simulation,
         num=10 ** 4,
         random_instance=RandomState(seed=self.parameters.seed),
     )
     self.stability = max(self.stability, 1 - self.stability)
     self.reliability = (1 + sqrt(2 * self.stability - 1)) / 2    # estimation of non-noisy vs. noisy
     self.progress_logger.debug(f'Gathering training set with {self.parameters.N} examples')
     self.training_set = tools.TrainingSet(
         instance=self.simulation,
         N=self.parameters.N,
         random_instance=RandomState(seed=self.parameters.seed),
     )
     self.progress_logger.debug('Setting up learner')
     self.learner = MultiLayerPerceptronScikitLearn(
         n=self.parameters.simulation.n,
         k=self.parameters.simulation.k,
         training_set=self.training_set,
         validation_frac=self.parameters.validation_frac,
         transformation=LTFArray.transform_atf,
         preprocessing='short',
         layers=self.parameters.layers,
         learning_rate=self.parameters.learning_rate,
         penalty=0.0002,
         beta_1=0.9,
         beta_2=0.999,
         tolerance=self.parameters.tolerance,
         patience=self.parameters.patience,
         iteration_limit=self.parameters.iteration_limit,
         batch_size=self.parameters.batch_size,
         seed_model=self.parameters.seed,
         print_learning=False,
         logger=self.progress_logger.debug,
         goal=0.95 * self.reliability,
     )
     self.learner.prepare()
Exemplo n.º 6
0
def train():
    instance = LTFArray(
        weight_array=LTFArray.normal_weights(n=48, k=4),
        transform=LTFArray.transform_atf,
        combiner=LTFArray.combiner_xor,
    )

    N = 18000

    # learn and test the model
    lr_learner = LogisticRegression(
        t_set=tools.TrainingSet(instance=instance, N=N),
        n=48,
        k=4,
        transformation=LTFArray.transform_atf,
        combiner=LTFArray.combiner_xor,
    )

    model = lr_learner.learn()
    accuracy = 1 - tools.approx_dist(instance, model, 10000)

    print(
        'Learned a 48bit 4-xor XOR Arbiter PUF from %d CRPs with accuracy %f' %
        (N, accuracy))
Exemplo n.º 7
0
sys.path.insert(0, 'pypuf/')

os.environ["OMP_NUM_THREADS"] = '1'
os.environ["NUMEXPR_NUM_THREADS"] = '1'
os.environ["MKL_NUM_THREADS"] = '1'

from pypuf import tools
from pypuf.simulation.arbiter_based.ltfarray import LTFArray

instance = LTFArray(
    weight_array=LTFArray.normal_weights(n=64, k=2),
    transform=LTFArray.transform_atf,
    combiner=LTFArray.combiner_xor
)

train_set = tools.TrainingSet(instance=instance, N=15000)
val_set = train_set.subset(slice(10000, None))
train_set = train_set.subset(slice(None, 10000))

challenges, responses = train_set.challenges, train_set.responses

print(challenges.shape, responses.shape)

from pypuf.learner.regression.logistic_regression import LogisticRegression

lr_learner = LogisticRegression(
    t_set=train_set,
    n=64,
    k=2,
    transformation=LTFArray.transform_atf,
    combiner=LTFArray.combiner_xor,
Exemplo n.º 8
0
iterations = array([])

for j in range(instances):

    stderr.write('----------- Choosing new instance. ---------\n')

    instance = LTFArray(
        weight_array=LTFArray.normal_weights(n, k, random_instance=instance_prng),
        transform=transformation,
        combiner=combiner,
    )
    
    if algorithm==2:
        learner = PolytopeAlgorithm(
                instance,
                tools.TrainingSet(instance=instance, N=N),
                n,
                k,
                transformation=transformation,
                combiner=combiner,
                weights_prng=model_prng,
                )
    else:
        learner = LogisticRegression(
                tools.TrainingSet(instance=instance, N=N),
                n,
                k,
                transformation=transformation,
                combiner=combiner,
                weights_prng=model_prng,
                )
Exemplo n.º 9
0
def main(args):

    if len(args) != 10:
        stderr.write('LTF Array Simulator and Logistic Regression Learner\n')
        stderr.write('Usage:\n')
        stderr.write(
            'sim_learn.py n k transformation combiner N restarts seed_instance seed_model\n'
        )
        stderr.write('               n: number of bits per Arbiter chain\n')
        stderr.write('               k: number of Arbiter chains\n')
        stderr.write(
            '  transformation: used to transform input before it is used in LTFs\n'
        )
        stderr.write('                  currently available:\n')
        stderr.write('                  - id  -- does nothing at all\n')
        stderr.write(
            '                  - atf -- convert according to "natural" Arbiter chain\n'
        )
        stderr.write('                           implementation\n')
        stderr.write(
            '                  - mm  -- designed to achieve maximum PTF expansion length\n'
        )
        stderr.write(
            '                           only implemented for k=2 and even n\n')
        stderr.write(
            '                  - lightweight_secure -- design by Majzoobi et al. 2008\n'
        )
        stderr.write(
            '                                          only implemented for even n\n'
        )
        stderr.write(
            '                  - shift_lightweight_secure -- design like Majzoobi\n'
        )
        stderr.write(
            '                                                et al. 2008, but with the shift\n'
        )
        stderr.write(
            '                                                operation executed first\n'
        )
        stderr.write(
            '                                                only implemented for even n\n'
        )
        stderr.write(
            '                  - soelter_lightweight_secure -- design like Majzoobi\n'
        )
        stderr.write(
            '                                                  et al. 2008, but one bit different\n'
        )
        stderr.write(
            '                                                  only implemented for even n\n'
        )
        stderr.write(
            '                  - 1_n_bent -- one LTF gets "bent" input, the others id\n'
        )
        stderr.write(
            '                  - 1_1_bent -- one bit gets "bent" input, the others id,\n'
        )
        stderr.write(
            '                                this is proven to have maximum PTF\n'
        )
        stderr.write('                                length for the model\n')
        stderr.write(
            '                  - polynomial -- challenges are interpreted as polynomials\n'
        )
        stderr.write(
            '                                  from GF(2^64). From the initial challenge c,\n'
        )
        stderr.write(
            '                                  the i-th Arbiter chain gets the coefficients \n'
        )
        stderr.write(
            '                                  of the polynomial c^(i+1) as challenge.\n'
        )
        stderr.write(
            '                                  For now only challenges with length n=64 are accepted.\n'
        )
        stderr.write(
            '                  - permutation_atf -- for each Arbiter chain first a pseudorandom permutation \n'
        )
        stderr.write(
            '                                       is applied and thereafter the ATF transform.\n'
        )
        stderr.write(
            '                  - random -- Each Arbiter chain gets a random challenge derived from the\n'
        )
        stderr.write(
            '                              original challenge using a PRNG.\n')
        stderr.write(
            '        combiner: used to combine the output bits to a single bit\n'
        )
        stderr.write('                  currently available:\n')
        stderr.write(
            '                  - xor     -- output the parity of all output bits\n'
        )
        stderr.write(
            '                  - ip_mod2 -- output the inner product mod 2 of all output\n'
        )
        stderr.write('                               bits (even n only)\n')
        stderr.write(
            '               N: number of challenge response pairs in the training set\n'
        )
        stderr.write(
            '        restarts: number of repeated initializations the learner\n'
        )
        stderr.write(
            '                  use float number x, 0<x<1 to repeat until given accuracy\n'
        )
        stderr.write(
            '       instances: number of repeated initializations the instance\n'
        )
        stderr.write(
            '                  The number total learning attempts is restarts*instances.\n'
        )
        stderr.write(
            '   seed_instance: random seed used for LTF array instance\n')
        stderr.write(
            '      seed_model: random seed used for the model in first learning attempt\n'
        )
        quit(1)

    n = int(args[1])
    k = int(args[2])
    transformation_name = args[3]
    combiner_name = args[4]
    N = int(args[5])

    if float(args[6]) < 1:
        restarts = float('inf')
        convergence = float(args[6])
    else:
        restarts = int(args[6])
        convergence = 1.1

    instances = int(args[7])

    seed_instance = int(args[8], 16)
    seed_model = int(args[9], 16)

    # reproduce 'random' numbers and avoid interference with other random numbers drawn
    instance_prng = RandomState(seed=seed_instance)
    model_prng = RandomState(seed=seed_model)

    transformation = None
    combiner = None

    try:
        transformation = getattr(LTFArray,
                                 'transform_%s' % transformation_name)
    except AttributeError:
        stderr.write(
            'Transformation %s unknown or currently not implemented\n' %
            transformation_name)
        quit()

    try:
        combiner = getattr(LTFArray, 'combiner_%s' % combiner_name)
    except AttributeError:
        stderr.write('Combiner %s unknown or currently not implemented\n' %
                     combiner_name)
        quit()

    stderr.write(
        'Learning %s-bit %s XOR Arbiter PUF with %s CRPs and %s restarts.\n\n'
        % (n, k, N, restarts))
    stderr.write('Using\n')
    stderr.write('  transformation:       %s\n' % transformation)
    stderr.write('  combiner:             %s\n' % combiner)
    stderr.write('  instance random seed: 0x%x\n' % seed_instance)
    stderr.write('  model random seed:    0x%x\n' % seed_model)
    stderr.write('\n')

    accuracy = array([])
    training_times = array([])
    iterations = array([])

    for j in range(instances):

        stderr.write('----------- Choosing new instance. ---------\n')

        instance = LTFArray(
            weight_array=LTFArray.normal_weights(
                n, k, random_instance=instance_prng),
            transform=transformation,
            combiner=combiner,
        )

        lr_learner = LogisticRegression(
            tools.TrainingSet(instance=instance, N=N),
            n,
            k,
            transformation=transformation,
            combiner=combiner,
            weights_prng=model_prng,
        )

        i = 0
        dist = 1

        while i < restarts and 1 - dist < convergence:
            stderr.write('\r%5i/%5i         ' %
                         (i + 1, restarts if restarts < float('inf') else 0))
            start = time.time()
            model = lr_learner.learn()
            end = time.time()
            training_times = append(training_times, end - start)
            dist = tools.approx_dist(instance, model, min(10000, 2**n))
            accuracy = append(accuracy, 1 - dist)
            iterations = append(iterations, lr_learner.iteration_count)
            # output test result in machine-friendly format
            # seed_ltf seed_model idx_restart n k N transformation combiner iteration_count time accuracy
            stderr.write(' '.join([
                '0x%x' % seed_instance,
                '0x%x' % seed_model,
                '%5d' % i,
                '%3d' % n,
                '%2d' % k,
                '%6d' % N,
                '%s' % transformation_name,
                '%s' % combiner_name,
                '%4d' % lr_learner.iteration_count,
                '%9.3f' % (end - start),
                '%1.5f' % (1 - dist),
            ]) + '\n')
            #stderr.write('training time:                % 5.3fs' % (end - start))
            #stderr.write('min training distance:        % 5.3f' % lr_learner.min_distance)
            #stderr.write('test distance (1000 samples): % 5.3f\n' % dist)
            i += 1

    stderr.write('\r              \r')
    stderr.write('\n\n')
    stderr.write('training times: %s\n' % training_times)
    stderr.write('iterations: %s\n' % iterations)
    stderr.write('test accuracy: %s\n' % accuracy)
    stderr.write('\n\n')
    stderr.write(
        'min/avg/max training time  : % 9.3fs /% 9.3fs /% 9.3fs\n' %
        (amin(training_times), mean(training_times), amax(training_times)))
    stderr.write('min/avg/max iteration count: % 9.3f  /% 9.3f  /% 9.3f \n' %
                 (amin(iterations), mean(iterations), amax(iterations)))
    stderr.write('min/avg/max test accuracy  : % 9.3f  /% 9.3f  /% 9.3f \n' %
                 (amin(accuracy), mean(accuracy), amax(accuracy)))
    stderr.write('\n\n')