Beispiel #1
0
 def __init__(self, training_set, degree, debug=False):
     self.training_set = training_set
     self.n = len(training_set.challenges[0])
     self.monomial_count = 0
     for k in range(degree + 1):
         self.monomial_count += ncr(self.n, k)
     self.degree = degree
     self.fourier_coefficients = []
     self.debug = debug
Beispiel #2
0
 def __init__(self, training_set, degree, debug=False):
     """
     :param training_set: pypuf.tools.TrainingSet
                          The trainings set generated by tools.TrainingSet
     :param degree: int
                    The degree up to which the Fourier coefficients are approximated
     :param debug: boolean
                   If true, a progress message with ETA will be periodically printed to stdout
     """
     self.training_set = training_set
     self.n = len(training_set.challenges[0])
     self.monomial_count = 0
     for k in range(degree + 1):
         self.monomial_count += ncr(self.n, k)
     self.degree = degree
     self.fourier_coefficients = []
     self.debug = debug
Beispiel #3
0
 def get_training_set_size(n, degree, epsilon, delta):
     """
     This function calculates the training set size that is needed to satisfy the theoretical requirements of the
     Low Degree Algorithm such that the compliance of the epsilon and delta parameters is guaranteed.
     :param n: int
               Input length
     :param degree: int
                    The degree up to which the Fourier coefficients are approximated
     :param epsilon: float
                     The maximum error rate of the model
     :param delta: float
                   The maximum failure rate of the algorithm, where epsilon is not satisfied
     :return:
     """
     monomial_count = 0
     for k in range(degree + 1):
         monomial_count += ncr(n, k)
     return int(4 * monomial_count * np.log(2 * monomial_count / delta) /
                epsilon)
Beispiel #4
0
def get_h_j_kk(j):
    if j <= N:
        kk = np.arange(0, (R * M - 1) * N + j)

        # whatever's inside the sigma_l
        def integrator_inner_loop(k):
            ll = np.arange(0, np.floor(k / R * M) + 1)
            return np.sum(
                np.power(-1, ll) * ncr(N, ll) * \
                ncr(N - j + k - R*M*ll, k - R*M*ll)
            )

        # sum over the sigma arguments
        h_j_kk = np.fromiter(map(integrator_inner_loop, kk), np.double)

    elif j >= N + 1:
        kk = np.arange(0, 2 * N + 1 - j + 1)

        h_j_kk = np.power(-1, kk) * ncr(2 * N + 1 - j, kk)

    return h_j_kk
Beispiel #5
0
 def integrator_inner_loop(k):
     ll = np.arange(0, np.floor(k / R * M) + 1)
     return np.sum(
         np.power(-1, ll) * ncr(N, ll) * \
         ncr(N - j + k - R*M*ll, k - R*M*ll)
     )
Beispiel #6
0
 def get_training_set_size(n, degree, epsilon, delta):
     monomial_count = 0
     for k in range(degree + 1):
         monomial_count += ncr(n, k)
     return int(4 * monomial_count * np.log(2 * monomial_count / delta) /
                epsilon)