Exemplo n.º 1
0
    def learn(self):
        model = LTFArray(
            weight_array=LTFArray.normal_weights(self.n, self.k,
                                                 self.weights_mu,
                                                 self.weights_sigma,
                                                 self.weights_prng),
            transform=self.transformation,
            combiner=self.combiner,
        )
        self.iteration_count = 0
        challenges = []
        responses = []

        challenges.append(ones(self.n))
        responses.append(
            self.__signum(sum(self.orig_LTFArray.weight_array * challenges)))

        while self.iteration_count < self.iteration_limit:

            self.__updateModel(model)
            stderr.write('\riter %5i         \n' % (self.iteration_count))
            self.iteration_count += 1
            [center, radius] = self.__chebyshev_center(challenges, responses)
            stderr.write("radius ")
            stderr.write("%f\n" % radius)
            stderr.write("distance ")

            model.weight_array = [center]
            distance = tools.approx_dist(self.orig_LTFArray, model,
                                         min(10000, 2**model.n))
            self.min_distance = min(distance, self.min_distance)
            if (distance < 0.01):
                break
            minAccuracy = abs(radius * sqrt(model.n))
            stderr.write("%f\n" % distance)
            newC = self.__closest_challenge(center, minAccuracy)
            challenges.append(newC)
            responses.append(
                self.__signum(sum(newC * self.orig_LTFArray.weight_array)))

        return model
Exemplo n.º 2
0
    def find_high_accuracy_weight_permutations(self, weights, threshold):
        """
        Gives permutations for the weight-array resulting in the highest model accuracies.
        :param weights: The original weight-array
        :param threshold: Minimum accuracy to consider
        :return: The 5k permutations with the highest accuracy
        """
        high_accuracy_permutations = []
        adopted_instance = LTFArray(
            weight_array=zeros((self.k, self.n)),
            transform=LTFArray.transform_lightweight_secure,
            combiner=LTFArray.combiner_xor)
        for permutation in list(permutations(range(self.k)))[1:]:
            adopted_instance.weight_array = self.adopt_weights(
                weights, permutation)
            accuracy = self.approx_accuracy(adopted_instance)
            self.logger.debug('For permutation %s, we have accuracy %.4f' %
                              (permutation, accuracy))
            if accuracy >= threshold:
                high_accuracy_permutations.append(
                    PermData(permutation, accuracy))

        high_accuracy_permutations.sort(key=lambda x: -x.accuracy)
        return high_accuracy_permutations[:5 * self.k]
Exemplo n.º 3
0
    def learn(self, init_weight_array=None, eta_minus=0.5, eta_plus=1.2, refresh_updater=True):
        """
        Compute a model according to the given LTF Array parameters and training set.
        Note that this function can take long to return.
        :return: pypuf.simulation.arbiter_based.LTFArray
                 The computed model.
        """
        self.logger.debug('LR learner started')
        test_set_accuracies = []

        # log format
        def log_state(step_size):
            """
            This method is used to log a snapshot of learning variables while running.
            """
            if self.logger is None:
                return
            self.logger.debug(
                '%i\t%s\t%f\t%f\t%f\t%s' % (
                    self.iteration_count,
                    f'{self.test_set_dist:.4f}' if self.test_set else '<no test set given>',
                    self.training_set_dist_sign,
                    self.training_set_dist,
                    step_size,
                    ','.join(map(str, model.weight_array.flatten())) if self.n <= 1024 else '<weight array too large>',
                )
            )

        # let numpy raise exceptions
        seterr(all='raise')

        # Prepare challenges
        self.logger.debug(f'Challenge bit type {self.training_set.challenges.dtype}')
        self.logger.debug(f'Transforming {len(self.training_set.challenges)} given {self.n}-bit '
                          f'challenges using {self.transformation.__name__} for k={self.k} ...')
        transformed_challenges = self.transformation(self.training_set.challenges, self.k)
        if self.bias:
            self.logger.debug(f'Efba\'ing {len(self.training_set.challenges)} given {self.n}-bit challenges')
            self.efba_sub_challenges = LTFArray.efba_bit(transformed_challenges)
        else:
            self.logger.debug(f'Not efba\'ing {len(self.training_set.challenges)} challenges, assuming unbiased target')
            self.efba_sub_challenges = transformed_challenges

        # we start with a random model
        self.logger.debug(f'Initializing random unbiased model')
        model = LTFArray(
            weight_array=LTFArray.normal_weights(self.n, self.k, self.weights_mu, self.weights_sigma,
                                                 self.weights_prng),
            transform=self.transformation,
            combiner=self.combiner,
            bias=0.0,
        )

        if init_weight_array is not None:
            model.weight_array = init_weight_array

        if refresh_updater:
            self.updater = self.RPropModelUpdate(model, bias=self.bias, eta_minus=eta_minus, eta_plus=eta_plus)
        converged = False
        self.iteration_count = 0
        log_state(0)
        number_of_batches = ceil(self.training_set.N / (self.minibatch_size or self.training_set.N))
        self.logger.debug(f'using {self.training_set.N} examples with batches of size '
                          f'{self.minibatch_size}, i.e. {number_of_batches} batches')
        efba_challenge_batches = []
        response_batches = []
        if not self.shuffle:
            efba_challenge_batches = array_split(self.efba_sub_challenges, number_of_batches)
            response_batches = array_split(self.training_set.responses, number_of_batches)

        self.logger.debug(f'Starting learning loop!')
        self.logger.debug(f'stopping when step size smaller than {10**-self.convergence_decimals} or '
                          f'{self.iteration_limit} epochs')
        while not converged and self.iteration_count < self.iteration_limit:
            self.iteration_count += 1
            self.epoch_count += 1

            if self.shuffle:
                if self.epoch_count > 1:
                    RandomState(seed=self.epoch_count).shuffle(self.efba_sub_challenges)
                    RandomState(seed=self.epoch_count).shuffle(self.training_set.responses)
                efba_challenge_batches = array_split(self.efba_sub_challenges, number_of_batches)
                response_batches = array_split(self.training_set.responses, number_of_batches)

            # compute gradient & update model
            for batch in range(number_of_batches):
                gradient = self.gradient(model, efba_challenge_batches[batch], response_batches[batch])
                if self.bias:
                    model.weight_array += self.updater.update(gradient)
                else:
                    model.weight_array[:, :-1] += self.updater.update(gradient)
                self.gradient_step_count += 1

                # check convergence
                current_step_size = norm(self.updater.step)
                if self.test_set and self.test_set.N:
                    self.test_set_dist = approx_dist_nonrandom(model, self.test_set)
                    test_set_accuracies.append(1 - self.test_set_dist)
                converged = (
                    current_step_size < 10**-self.convergence_decimals
                    or (self.target_test_accuracy and 1 - self.test_set_dist > self.target_test_accuracy)
                    or (
                        self.test_accuracy_improvement
                        and self.test_accuracy_patience
                        and len(test_set_accuracies) >= self.test_accuracy_patience
                        and (
                            abs(
                                min(test_set_accuracies[-self.test_accuracy_patience:])
                                - max(test_set_accuracies[-self.test_accuracy_patience:])
                            ) < self.test_accuracy_improvement
                        )
                    )
                ) and (
                    self.iteration_count > self.min_iterations
                )

                # log
                log_state(current_step_size)

                if converged:
                    break

        self.efba_sub_challenges = None  # del ref to training set memory to allow GC if the t-set is also dereferenced
        self.converged = converged
        return model
Exemplo n.º 4
0
    def learn(self,
              init_weight_array=None,
              eta_minus=0.5,
              eta_plus=1.2,
              refresh_updater=True):
        """
        Compute a model according to the given LTF Array parameters and training set.
        Note that this function can take long to return.
        :return: pypuf.simulation.arbiter_based.LTFArray
                 The computed model.
        """

        # log format
        def log_state():
            """
            This method is used to log a snapshot of learning variables while running.
            """
            if self.logger is None:
                return
            self.logger.debug('%i\t%f\t%f\t%s' % (
                self.iteration_count,
                distance,
                norm(self.updater.step),
                ','.join(map(str, model.weight_array.flatten())),
            ))

        # let numpy raise exceptions
        seterr(all='raise')

        # Prepare challenges
        self.efba_sub_challenges = LTFArray.efba_bit(
            self.transformation(self.training_set.challenges, self.k))

        # we start with a random model
        model = LTFArray(
            weight_array=LTFArray.normal_weights(self.n, self.k,
                                                 self.weights_mu,
                                                 self.weights_sigma,
                                                 self.weights_prng),
            transform=self.transformation,
            combiner=self.combiner,
            bias=0.0,
        )

        if init_weight_array is not None:
            model.weight_array = init_weight_array

        if refresh_updater:
            self.updater = self.RPropModelUpdate(model,
                                                 eta_minus=eta_minus,
                                                 eta_plus=eta_plus)
        converged = False
        distance = 1
        self.iteration_count = 0
        log_state()
        number_of_batches = (self.training_set.N + 1) // self.minibatch_size
        efba_challenge_batches = []
        response_batches = []
        if not self.shuffle:
            efba_challenge_batches = array_split(self.efba_sub_challenges,
                                                 number_of_batches)
            response_batches = array_split(self.training_set.responses,
                                           number_of_batches)
        while not converged and self.iteration_count < self.iteration_limit:
            self.iteration_count += 1
            self.epoch_count += 1

            if self.shuffle:
                if self.epoch_count > 1:
                    RandomState(seed=self.epoch_count).shuffle(
                        self.efba_sub_challenges)
                    RandomState(seed=self.epoch_count).shuffle(
                        self.training_set.responses)
                efba_challenge_batches = array_split(self.efba_sub_challenges,
                                                     number_of_batches)
                response_batches = array_split(self.training_set.responses,
                                               number_of_batches)

            # compute gradient & update model
            for batch in range(number_of_batches):
                gradient = self.gradient(model, efba_challenge_batches[batch],
                                         response_batches[batch])
                model.weight_array += self.updater.update(gradient)
                self.gradient_step_count += 1

                # check convergence
                converged = norm(
                    self.updater.step) < 10**-self.convergence_decimals

                # log
                log_state()

                if converged:
                    break

        if not converged:
            self.converged = False
        else:
            self.converged = True

        return model