def _raw_mul(self, plaintext): """Returns the integer E(a * plaintext), where E(a) = ciphertext Args: plaintext (int): number by which to multiply the `EncryptedNumber`. *plaintext* is typically an encoding. 0 <= *plaintext* < :attr:`~PaillierPublicKey.n` Returns: int: Encryption of the product of `self` and the scalar encoded in *plaintext*. Raises: TypeError: if *plaintext* is not an int. ValueError: if *plaintext* is not between 0 and :attr:`PaillierPublicKey.n`. """ if not isinstance(plaintext, int): raise TypeError('Expected ciphertext to be int, not %s' % type(plaintext)) if plaintext < 0 or plaintext >= self.public_key.n: raise ValueError('Scalar out of bounds: %i' % plaintext) if self.public_key.n - self.public_key.max_int <= plaintext: # Very large plaintext, play a sneaky trick using inverses neg_c = invert(self.ciphertext(False), self.public_key.nsquare) neg_scalar = self.public_key.n - plaintext return powmod(neg_c, neg_scalar, self.public_key.nsquare) else: return powmod(self.ciphertext(False), plaintext, self.public_key.nsquare)
def __truediv__(a, b): """Homomorphically divide a Paillier ciphertext by an integer Note that it is not possible to perform this operation between two Paillier ciphertexts. This is because the Paillier cryptosystem is only partially homomorphic, and not fully homomorphic. For an implementation of a fully homomorphic cryptosystem, search for TFHE. Also note that his is an exact division modulo `n`. Thus, the result is itself an integer `q` such that `q × b = a mod n`. Arguments: a (PaillierCiphertext): left operand b (int): right operand Returns: PaillierCiphertext: decrypting this ciphertext should yield the division modulo `n` of the value obtained by decrypting the ciphertext `a` with the integer `b` """ if isinstance(b, PaillierCiphertext): raise NotImplementedError('Have a look at TFHE ;-)') pk = a.public_key if b not in pk.inverts: pk.inverts[b] = util.invert(b, pk.n) return a * pk.inverts[b]
def __init__(self, p, q, g): """Constructor Arguments: p (int): parameter from the Paillier cryptosystem q (int): parameter from the Paillier cryptosystem g (int): parameter from the Paillier cryptosystem """ self.p = p self.q = q self.public_key = pk = PaillierPublicKey(p * q, g) # pre-computations self.hp = util.invert(pk.L(util.powmod(pk.g, p - 1, p * p), p), p) self.hq = util.invert(pk.L(util.powmod(pk.g, q - 1, q * q), q), q)
def detect_edges_gradient(image_path, alpha): """ Saves a new image that presents the non-directional edges in the image saved at the given |image_path|. This method uses the Gradient based method for edge detection. |alpha|, a number greater than 0, is a parameter to determine which pixels to consider edges. The higher the value of |alpha| the higher the gradient has to be to for a pixel to be selected. """ print '\tcomputing 2D signal from image path' signal = image_to_two_D_signal(image_path) print '\tcomputing horizontal gradient' horizontal_gradient = clipped_fft_convolve(signal, HORIZONTAL_EDGE_FILTER) print '\tcomputing vertical gradient' vertical_gradient = clipped_fft_convolve(signal, VERTICAL_EDGE_FILTER) print '\tcomputing non-directional gradient' non_directional_gradient = Two_D_Signal({key: sqrt(horizontal_gradient[ key] ** 2 + vertical_gradient[key] ** 2) for key in horizontal_gradient.values}) print '\tfinding 95th percentile gradient magnitude' cutoff_abs = scoreatpercentile(non_directional_gradient.non_zero_values(), 95) print '\tscaling gradient image with alpha=%s' % alpha scale_f = scaler(cutoff_abs, 255, alpha) scaled = Two_D_Signal({key: scale_f(non_directional_gradient[key]) for key in non_directional_gradient.values}) print '\tinverting scaled image' inverted = invert(scaled, 255) print '\tsaving result image' new_image_path = join(strip_dir(image_path), '%s_gradient_edges.%s' % tuple( strip_file_name(image_path).split('.'))) two_D_signal_to_image(inverted, new_image_path) print '\tdone' return new_image_path
def expectation_maximization(N, M, Psi): """ The expectation maximization method (EM) from Dong et al., 2013. It iteratively estimates the probs of objects, then the accuracies of sources until a convergence is reached. :param N: :param M: :param Psi: :return: """ inv_Psi = invert(N, M, Psi) # convergence eps eps = 0.001 iter_max = 100 # init accuracies A = [np.random.uniform(0.7, 1.0) for s in range(N)] iter = 0 while iter != iter_max: # E-step p = [] for obj in range(M): # a pass to detect all values of an object C = defaultdict(float) for s, val in Psi[obj]: C[val] = 0.0 # total number of values V = len(C) # a pass to compute value confidences for s, val in Psi[obj]: for v in C.keys(): if v == val: if A[s] == 0.: A[s] = 0.5 C[v] += math.log(A[s]) else: if A[s] == 1.: A[s] = 0.95 C[v] += math.log((1-A[s])/(V-1)) # compute probs # normalize norm = 0.0 for val in C.keys(): norm += math.exp(C[val]) for val in C.keys(): C[val] = math.exp(C[val])/norm p.append(C) # M-step A_new = [np.average([p[obj][val] for obj, val in x]) for x in inv_Psi] # convergence check if sum(abs(np.subtract(A, A_new))) < eps: A = A_new break else: A = A_new iter += 1 return A, p
def move(self, direction): def move_row_left(row): def tighten(row): new_row = [i for i in row if i != 0] new_row += [0 for i in range(len(row) - len(new_row))] return new_row def merge(row): pair = False new_row = [] for i in range(len(row)): if pair: new_row.append(2*row[i]) self.score += 2*row[i] pair = False else: if i + 1 < len(row) and row[i] == row[i + 1]: pair = True new_row.append(0) else: new_row.append(row[i]) assert len(new_row) == len(row) return new_row return tighten(merge(tighten(row))) moves = {} moves['Left'] = lambda field: \ [move_row_left(row) for row in field] moves['Right'] = lambda field: \ invert(moves['Left'](invert(field))) moves['Up'] = lambda field: \ transpose(moves['Left'](transpose(field))) moves['Down'] = lambda field: \ transpose(moves['Right'](transpose(field))) if direction in moves: if self.move_is_possible(direction): self.field = moves[direction](self.field) self.spawn() return True else: return False
def share_paillier_keypair(pk, sk, n_shares): """Share an existing keypair for the Paillier cryptosystem Arguments: pk (PaillierPublicKey): public part of the keypair to be shared sk (PaillierSecretKey): secret part of the keypair to be shared n_shares (int): the number of shares into which to split the keypair Returns: tuple: pair of two elements, usually named respectively `pk_shares` (`list` of `PaillierPublicKeyShare`) and `sk_shares` (`list` of `PaillierSecretKeyShare`). When used together, the secret key shares (`sk_shares`) allow to decrypt ciphertexts generated using the given public key (but not using another), using the method `assemble_decryption_shares()` from `PaillierPublicKeyShare`. The public key shares (`pk_shares`) can be used to verify that each secret key share was used correctly (usually one share would be given to each party, and decryption would imply that each party correctly processes the ciphertext using their secret key share). """ m = (sk.p - 1) * (sk.q - 1) exponent = util.invert(pk.n, m) # the verification base must generate the quadratic residues; which happens # with overwhelming probability for a random square verification_base = random.SystemRandom().randrange(pk.nsquare)**2 % pk.n # split the secret exponent into required number of shares key_shares = [ random.SystemRandom().randrange(m) for _ in range(n_shares - 1) ] key_shares.append((exponent - sum(key_shares)) % (m)) # compute corresponding verification elements verifications = [ util.powmod(verification_base, key_share, pk.nsquare) for key_share in key_shares ] # create public and private key shares pk_shares = [ PaillierPublicKeyShare(pk, verification_base, verification) for verification in verifications ] sk_shares = [ PaillierSecretKeyShare(pk, verification_base, key_share) for key_share in key_shares ] return pk_shares, sk_shares
def __init__(self, public_key, p, q): if not p * q == public_key.n: raise ValueError( 'given public key does not match the given p and q.') if p == q: #check that p and q are different, otherwise we can't compute p^-1 mod q raise ValueError('p and q have to be different') self.public_key = public_key if q < p: #ensure that p < q. self.p = q self.q = p else: self.p = p self.q = q self.psquare = self.p * self.p self.qsquare = self.q * self.q self.p_inverse = invert(self.p, self.q) self.hp = self.h_function(self.p, self.psquare) self.hq = self.h_function(self.q, self.qsquare)
def raw_encrypt(self, plaintext, r_value=None): """Paillier encryption of a positive integer plaintext < :attr:`n`. You probably should be using :meth:`encrypt` instead, because it handles positive and negative ints and floats. Args: plaintext (int): a positive integer < :attr:`n` to be Paillier encrypted. Typically this is an encoding of the actual number you want to encrypt. r_value (int): obfuscator for the ciphertext; by default (i.e. r_value is None), a random value is used. Returns: int: Paillier encryption of plaintext. Raises: TypeError: if plaintext is not an int or mpz. """ if not isinstance(plaintext, int) and not isinstance( plaintext, type(mpz(1))) and not isinstance( plaintext, numpy.int64): raise TypeError('Expected int type plaintext but got: %s' % type(plaintext)) if self.n - self.max_int <= plaintext < self.n: # Very large plaintext, take a sneaky shortcut using inverses neg_plaintext = self.n - plaintext # = abs(plaintext - nsquare) neg_ciphertext = (self.n * neg_plaintext + 1) % self.nsquare nude_ciphertext = invert(neg_ciphertext, self.nsquare) else: # we chose g = n + 1, so that we can exploit the fact that # (n+1)^plaintext = n*plaintext + 1 mod n^2 nude_ciphertext = (self.n * plaintext + 1) % self.nsquare # r = r_value or self.get_random_lt_n() # obfuscator = powmod(r, self.n, self.nsquare) r = r_value or powmod(self.get_random_lt_n(), self.n, self.nsquare) # Pass the precomputed obfuscator obfuscator = r return (nude_ciphertext * obfuscator) % self.nsquare
def detect_edges_laplacian(image_path, alpha, variance_filter=True): """ Saves a new image that presents the edges in the image saved at the given |image_path|. This method uses the Laplacian based method for edge detection. |alpha|, a number greater than 0, is a parameter to determine which pixels to consider edges. The higher the value of |alpha| the higher the laplacian has to be to for a pixel to be selected. If |variance_filter| is set, only pixels with high variance have a chance at all of being selected as edges. """ print '\tcomputing 2D signal from image path' signal = image_to_two_D_signal(image_path) print '\tcomputing laplacian' laplacian = clipped_fft_convolve(signal, LAPLACIAN_FILTER) print '\tcomputing abs' abs_laplacian = abs(laplacian) print '\tfinding cutoff abs: 95th percentile abs' cutoff_abs = scoreatpercentile(abs_laplacian.non_zero_values(), 95) print '\tscaling abs image with alpha=%s' % alpha scale_f = scaler(cutoff_abs, 255, alpha) scaled = Two_D_Signal({key: scale_f(abs_laplacian[key]) for key in abs_laplacian.values}) print '\tinverting scaled image' inverted = invert(scaled, 255) if variance_filter: print '\tcomputing variance' var = var_signal(signal, 2) print '\tfinding cutoff variance' cutoff_var = scoreatpercentile(var.non_zero_values(), 70) print '\tupdating result image with cutoff variance' for n1 in xrange(inverted.n1_min, inverted.n1_max + 1): for n2 in xrange(inverted.n2_min, inverted.n2_max + 1): if var[n1, n2] < cutoff_var: inverted.set_value(n1, n2, 255) print '\tsaving result image' new_image_path = join(strip_dir(image_path), '%s_laplacian_edges.%s' % tuple( strip_file_name(image_path).split('.'))) two_D_signal_to_image(inverted, new_image_path) print '\tdone' return new_image_path
def move_is_possible(self, direction): def row_is_left_movable(row): def change(i): if 0 == row[i] and 0 != row[i + 1]: return True if 0 != row[i] and row[i + 1] == row[i]: return True return False return any(change(i) for i in range(len(row) - 1)) check = {} check['Left'] = lambda field: \ any(row_is_left_movable(row) for row in field) check['Right'] = lambda field: \ check['Left'](invert(field)) check['Up'] = lambda field: \ check['Left'](transpose(field)) check['Down'] = lambda field: \ check['Right'](transpose(field)) if direction in check: return check[direction](self.field) else: return False
import math from util import invert def encrypt(m, e, n): return pow(m, e, n) def decrypt(c, d, n): return pow(c, d, n) if __name__ == '__main__': p = 9989999999933 q = 9999999999971 n = p * q l = (p - 1) * (q - 1) // math.gcd(p - 1, q - 1) e = 65537 d = invert(e, l) m = int.from_bytes(sys.argv[1].encode('utf-8'), 'little') print(m) c = encrypt(m, e, n) print(decrypt(c, d, n)) # IND-CCA2 PoC r = 2 cr = (c * pow(r, e, n)) % n mr = decrypt(cr, d, n) mprime = (mr * invert(r, n)) % n print(mprime)
def h_function(self, x, xsquare): """Computes the h-function as defined in Paillier's paper page 12, 'Decryption using Chinese-remaindering'. """ return invert( self.l_function(powmod(self.public_key.g, x - 1, xsquare), x), x)
def sign(hash, k, x, g, p): r = pow(g, k, p) s = (invert(k, p - 1) * (hash - x * r)) % (p - 1) return r, s
def mcmc(N, M, Psi, params): """ MCMC for log-likelihood maximum search. :param N: :param M: :param Psi: :param inv_Psi: :return: """ N_iter = params['N_iter'] burnin = params['burnin'] thin = params['thin'] inv_Psi = invert(N, M, Psi) # random init A = np.random.uniform(0.7, 1.0, N) # MCMC sampling sample_size = 0.0 mcmc_p = [defaultdict(float) for x in range(M)] for _iter in range(N_iter): # update objects p = [] for obj in range(M): # a pass to detect all values of an object C = {} for s, val in Psi[obj]: C[val] = 0.0 # total number of values V = len(C) # a pass to compute value confidences for s, val in Psi[obj]: for v in C.keys(): if v == val: C[v] += math.log(A[s]) else: C[v] += math.log((1-A[s])/(V-1)) # compute probs # normalize norm = 0.0 for val in C.keys(): norm += math.exp(C[val]) for val in C.keys(): C[val] = math.exp(C[val])/norm p.append(C) # draw object values O = [] for x in p: if len(x) > 0: vals = [] probs = [] for val, prob in x.iteritems(): vals.append(val) probs.append(prob) O.append(vals[np.where(np.random.multinomial(1, probs) == 1)[0][0]]) else: # if there are now values per object O.append(None) # update sources for source_id in range(N): beta_0 = 0 beta_1 = 0 for obj, val in inv_Psi[source_id]: if val == O[obj]: beta_0 += 1 else: beta_1 += 1 A[source_id] = beta(beta_0 + 4, beta_1 + 1) if _iter > burnin and _iter % thin == 0: sample_size += 1 for obj in range(M): mcmc_p[obj][O[obj]] += 1 # mcmc output for p in mcmc_p: for val in p.keys(): p[val] /= sample_size mcmc_A = [0.0 for s in range(N)] for s in range(N): for obj, val in inv_Psi[s]: # TODO take advantage of priors (as in Zhao et al., 2012) mcmc_A[s] += mcmc_p[obj][val] mcmc_A[s] /= (0.0+len(inv_Psi[s])) return mcmc_A, mcmc_p
def decrypt(c1, c2, x, p): c1x = pow(c1, x, p) return (c2 * invert(c1x, p)) % p