def additive_shares_with_rho(x): """Returns a tuple of a list of tuples (player id, share, rho) and rho. Chooses random elements ``x_1, ..., x_n-1`` in field and ``x_n`` st. ``x_n = x - Sum_i=1^n-1 x_i``. Chooses random pair of elements ``rho_1, ..., rho_n in Z_p^2`` and define ``rho_n = Sum_i=1^n rho_i``. Returns a pair of ``((player id, x_i, rho_i), rho)``. """ shares = [] rhos = [] sum = 0 rho1 = 0 rho2 = 0 for i in xrange(1, self.num_players): xi = field(rand.randint(0, field.modulus - 1)) rhoi1 = field(rand.randint(0, field.modulus - 1)) rhoi2 = field(rand.randint(0, field.modulus - 1)) sum += xi rho1 += rhoi1 rho2 += rhoi2 shares.append((i, xi, (rhoi1, rhoi2))) xn = field(x) - sum rhon1 = field(rand.randint(0, field.modulus - 1)) rhon2 = field(rand.randint(0, field.modulus - 1)) shares.append((self.num_players, xn, (rhon1, rhon2))) rho1 += rhon1 rho2 += rhon2 return shares, (rho1, rho2)
def generate_operation_arguments(self, _): # print "Generate operation arguments", self.rt.program_counter print "Runtime ready, generating shares" self.a_shares = [] self.b_shares = [] for i in range(self.count): inputter = (i % len(self.rt.players)) + 1 if inputter == self.rt.id: a = rand.randint(0, self.field.modulus) b = rand.randint(0, self.field.modulus) else: a, b = None, None self.a_shares.append(self.rt.input([inputter], self.field, a)) self.b_shares.append(self.rt.input([inputter], self.field, b)) shares_ready = gather_shares(self.a_shares + self.b_shares) return shares_ready
def share(self, inputters, field, number=None): """Share *number* additively.""" assert number is None or self.id in inputters results = [] for peer_id in inputters: # Unique program counter per input. self.increment_pc() if peer_id == self.id: a = field(rand.randint(0, field.modulus - 1)) b = number - a results.append(Share(self, a.field, a)) pc = tuple(self.program_counter) self.protocols[self.peer.id].sendShare(pc, b) else: share = self._expect_share(peer_id, field) results.append(share) # Unpack a singleton list. if len(results) == 1: return results[0] else: return results
def convert_bit_share(self, share, dst_field): """Convert a 0/1 share into *dst_field*.""" l = self.options.security_parameter + \ int(math.ceil(math.log(dst_field.modulus, 2))) # TODO assert field sizes are OK... this_mask = rand.randint(0, (2**l) - 1) # Share large random values in the big field and reduced ones # in the small... src_shares = self.prss_share(self.players, share.field, this_mask) dst_shares = self.prss_share(self.players, dst_field, this_mask) tmp = reduce(self.add, src_shares, share) # We open tmp and convert the value into a field element from # the dst_field. tmp = self.open(tmp) tmp.addCallback(lambda i: dst_field(i.value)) # Must update field on Share when we change the field of the # the value within tmp.field = dst_field full_mask = reduce(self.add, dst_shares) return tmp - full_mask
def prepare_verification(rs_serialized, results): # Repr/eval deserialization. rs = [eval(rss) for rss in rs_serialized] for i in xrange(n): a = triple_candidates[i] b = triple_candidates[i + 2 * n] c = triple_candidates[i + 4 * n] x = triple_candidates[i + n] y = triple_candidates[i + 3 * n] z = triple_candidates[i + 5 * n] # Hash all received random values to agree on a single # random value for each triple. hash = hashlib.sha1() for rp in rs: hash.update(str(rp[i])) # TODO: We should use a secure random generator here. rand = Random(hash.digest()) r = self.Zp(rand.randint(0, self.p - 1)) l = self.runtime._cmul(r, x, self.Zp) m = self.runtime._cmul(r, y, self.Zp) k = self.runtime._cmul(r * r, z, self.Zp) v = c - self.runtime._basic_multiplication(a, b, l, m, k) v = self.runtime.open(v) self.runtime.schedule_callback(v, verify, a, b, c) v.addCallbacks(results[i].callback, results[i].errback)
def generate_key(): # TODO: is a 40 byte hex string as good as a 20 byte binary # string when it is used for SHA1 hashing? It ought to be # since they contain the same entropy. # A SHA1 hash is 160 bit return hex(rand.randint(0, 2**160))
def _send_some_bytes(self): assert self._pending.called assert not self._pending.cancelled # Check that there is still something to send: if not self._buffer: # If the buffer is empty and we have been asked to # disconnect, then do so. if self._will_disconnect: self._cancel_pending() # Empty the buffer. bytes = self._buffer self._buffer = '' # Signal the disconnect to those waiting on the queue. self.q.disconnect = True self.q.put(bytes) # Return without scheduling another write. return # Cut the buffer in two at a random place and write a chunk to # the protocol: cut = rand.randint(0, len(self._buffer)) chunk, self._buffer = self._buffer[:cut], self._buffer[cut:] # Schedule another go after a random delay. self._schedule_write() # Finally put the chunk into the queue. self.q.put(chunk)
def protocol(rt): print "Testing online requirements for Toft07 comparison" l = rt.options.bit_length k = rt.options.security_parameter assert Zp.modulus > 2**(l+k) print "l = %d" % l print "k = %d" % k shares = [] for n in range(2*count//len(players) + 1): input = rand.randint(0, 2**l - 1) shares.extend(rt.shamir_share([1, 2, 3], Zp, input)) # We want to measure the time for count comparisons, so we need # 2*count input numbers. shares = shares[:2*count] preproc = [] pseudoPreproc = [] for i in range(count): thisPreproc = rt.greater_than_equal_preproc(Zp, smallField = Zq) preproc.append(thisPreproc) pseudoPreproc += thisPreproc[2:-1] pseudoPreproc += thisPreproc[-1] # print status as we go along # TODO: why does this not work? def printDonePreproc(_, i): print "Done preprocessing %d" % i return _ tmp = DeferredList(thisPreproc[2:-1]) tmp.addCallback(printDonePreproc, i) def run_test(_): print "Preprocessing done..." print "Making %d comparisons" % count record_start() bits = [] while len(shares) > 1: a = shares.pop(0) b = shares.pop(0) c = rt.greater_than_equal_online(a, b, preproc.pop(), Zp) bits.append(c) stop = DeferredList(bits) stop.addCallback(record_stop) stop.addCallback(finish) # TODO: it would be nice it the results were checked # automatically, but it needs to be done without adding # overhead to the benchmark. # We want to wait until all numbers have been shared and # preprocessing has been performed dl = gatherResults(shares + pseudoPreproc) dl.addCallback(run_test)
def random_share(self, field): """Generate a random share in the field, field. To generate a share of a random element ``r in Z_p``, party ``P_i`` chooses at random ``r_i, rho_ri in Z_p X (Z_p)^2`` and broadcast ``C_r^i = Com_ck(r_i, rho_ri)``. Every party computes ``C_r = PRODUCT_i=1^n C_r^i = Com_ck(r, rho_r)``, where ``r_i = SUM_i=1^n r_i and rho_r = SUM_i=1^n rho_ri``. Party ``P_i sets [r]_i = (r_i, rho_ri, C_r)``. """ self.increment_pc() # P_i chooses at random r_i, rho_ri in Z_p x (Z_p)^2 ri = field(rand.randint(0, field.modulus - 1)) rhoi1 = field(rand.randint(0, field.modulus - 1)) rhoi2 = field(rand.randint(0, field.modulus - 1)) # compute C_r^i = Com_ck(r_i, rho_ri). Cri = commitment.commit(ri.value, rhoi1, rhoi2) # Broadcast C_r^i. sls = gatherResults(self.broadcast(self.players.keys(), self.players.keys(), repr(Cri))) def compute_commitment(ls): Cr = reduce(operator.mul, ls) return OrlandiShare(self, field, ri, (rhoi1, rhoi2), Cr) def deserialize(ls): return [ commitment.deserialize(x) for x in ls ] sls.addCallbacks(deserialize, self.error_handler) sls.addCallbacks(compute_commitment, self.error_handler) s = Share(self, field) # We add the result to the chains in triple. sls.chainDeferred(s) # do actual communication self.activate_reactor() return s
def __init__(self, runtime): # Save the Runtime for later use self.runtime = runtime # This is the value we will use in the protocol. self.millions = rand.randint(1, 200) print "I am Millionaire %d and I am worth %d millions." \ % (runtime.id, self.millions) # For the comparison protocol to work, we need a field modulus # bigger than 2**(l+1) + 2**(l+k+1), where the bit length of # the input numbers is l and k is the security parameter. # Further more, the prime must be a Blum prime (a prime p such # that p % 4 == 3 holds). The find_prime function lets us find # a suitable prime. l = runtime.options.bit_length k = runtime.options.security_parameter Zp = GF(find_prime(2**(l + 1) + 2**(l + k + 1), blum=True)) # We must secret share our input with the other parties. They # will do the same and we end up with three variables m1, m2, m3 = runtime.shamir_share([1, 2, 3], Zp, self.millions) # Now that everybody has secret shared their inputs we can # compare them. We compare the worth of the first millionaire # with the two others, and compare those two millionaires with # each other. m1_ge_m2 = m1 >= m2 m1_ge_m3 = m1 >= m3 m2_ge_m3 = m2 >= m3 # The results are secret shared, so we must open them before # we can do anything usefull with them. open_m1_ge_m2 = runtime.open(m1_ge_m2) open_m1_ge_m3 = runtime.open(m1_ge_m3) open_m2_ge_m3 = runtime.open(m2_ge_m3) # We will now gather the results and call the # self.results_ready method when they have all been received. results = gather_shares([open_m1_ge_m2, open_m1_ge_m3, open_m2_ge_m3]) results.addCallback(self.results_ready) # We can add more callbacks to the callback chain in results. # These are called in sequence when self.results_ready is # finished. The first callback acts like a barrier and makes # all players wait on each other. # # The callbacks are always called with an argument equal to # the return value of the preceeding callback. We do not need # the argument (which is None since self.results_ready does # not return anything), so we throw it away using a lambda # expressions which ignores its first argument. runtime.schedule_callback(results, lambda _: runtime.synchronize()) # The next callback shuts the runtime down, killing the # connections between the players. runtime.schedule_callback(results, lambda _: runtime.shutdown())
def make_array(self): array = [] for i in range(options.size): inputter = (i % 3) + 1 if inputter == self.rt.id: number = rand.randint(1, options.max) print "Sharing array[%d] = %s" % (i, number) else: number = None share = self.rt.shamir_share([inputter], Zp, number) array.append(share) return array
def single_share_random(self, T, degree, field): """Share a random secret. The guarantee is that a number of shares are made and out of those, the *T* that are returned by this method will be correct sharings of a random number using *degree* as the polynomial degree. """ si = rand.randint(0, field.modulus - 1) svec, rvec = self._share_single(si, degree, field) result = gather_shares(svec[T:]) self.schedule_callback(result, self._exchange_single, rvec, T, field, degree) return result
def convert_bit_share(self, share, dst_field): """Convert a 0/1 share into dst_field.""" bit = rand.randint(0, 1) dst_shares = self.prss_share(self.players, dst_field, bit) src_shares = self.prss_share(self.players, share.field, bit) # TODO: Using a parallel reduce below seems to be slower than # using the built-in reduce. # We open tmp and convert the value into a field element from # the dst_field. tmp = self.open(reduce(self.xor, src_shares, share)) tmp.addCallback(lambda i: dst_field(i.value)) # Must update field on Share when we change the field of the # the value within tmp.field = dst_field return reduce(self.xor, dst_shares, tmp)
def double_share_random(self, T, d1, d2, field): """Double-share a random secret using two polynomials. The guarantee is that a number of shares are made and out of those, the *T* that are returned by this method will be correct double-sharings of a random number using *d1* and *d2* as the polynomial degrees. """ si = rand.randint(0, field.modulus - 1) svec1, rvec1 = self._share_single(si, d1, field) svec2, rvec2 = self._share_single(si, d2, field) result = gather_shares( [gather_shares(svec1[T:]), gather_shares(svec2[T:])]) self.schedule_callback(result, self._exchange_double, rvec1, rvec2, T, field, d1, d2) return result
def generate_keys(bit_length): # Make an RSA modulus n. p = find_random_prime(bit_length / 2) while True: q = find_random_prime(bit_length / 2) if p <> q: break n = p * q nsq = n * n # Calculate Carmichael's function. lm = gmpy.lcm(p - 1, q - 1) # Generate a generator g in B. while True: g = rand.randint(1, long(nsq)) if gmpy.gcd(L(pow(g, lm, nsq), n), n) == 1: break return {'n': n, 'g': g}, {'n': n, 'g': g, 'lm': lm}
def finish_mul((a, b)): pc = tuple(self.program_counter) send_data = self.protocols[self.peer.id].sendData if hash(pc) % 2 == self.id: # We play the role of P1. a1, b1 = a, b enc_a1 = encrypt(a1.value, self.player.pubkey) enc_b1 = encrypt(b1.value, self.player.pubkey) send_data(pc, PAILLIER, str(enc_a1)) send_data(pc, PAILLIER, str(enc_b1)) enc_c1 = Share(self, field) self._expect_data(self.peer.id, PAILLIER, enc_c1) c1 = enc_c1.addCallback(decrypt, self.player.seckey) c1.addCallback(lambda c: long(c) + a1 * b1) return c1 else: # We play the role of P2. a2, b2 = a, b enc_a1 = Deferred() self._expect_data(self.peer.id, PAILLIER, enc_a1) enc_a1.addCallback(long) enc_b1 = Deferred() self._expect_data(self.peer.id, PAILLIER, enc_b1) enc_b1.addCallback(long) nsq = self.peer.pubkey['n']**2 # Calculate a1 * b2 and b1 * a2 inside the encryption. enc_a1_b2 = enc_a1.addCallback(pow, b2.value, nsq) enc_b1_a2 = enc_b1.addCallback(pow, a2.value, nsq) # Chose and encrypt r. r = rand.randint(0, 2 * field.modulus**2 + 2**k) enc_r = encrypt(r, self.peer.pubkey) c1 = gatherResults([enc_a1_b2, enc_b1_a2]) c1.addCallback(lambda (a, b): a * b * enc_r) c1.addCallback(lambda c: send_data(pc, PAILLIER, str(c))) c2 = a2 * b2 - r return Share(self, field, c2)
def prss_shamir_share_bit_double(self, field): """Shamir share a random bit over *field* and GF256.""" n = self.num_players k = self.options.security_parameter prfs = self.players[self.id].prfs(2**k) prss_key = self.prss_key() inputters = range(1, self.num_players + 1) ri = rand.randint(0, 2**k - 1) ri_p = self.shamir_share(inputters, field, ri) ri_lsb = self.shamir_share(inputters, GF256, ri & 1) r_p = reduce(self.add, ri_p) r_lsb = reduce(self.add, ri_lsb) b_p = self.prss_share_random(field, binary=True) b = self.open(b_p + r_p) # Extract least significant bit and change field to GF256. b.addCallback(lambda i: GF256(i.value & 1)) b.field = GF256 # Use r_lsb to flip b as needed. return (b_p, b ^ r_lsb)
result.addErrback(self.error_handler) return result # 1) Every party P_i chooses random values a_i, r_i in Z_p X (Z_p)^2, # compute alpha_i = Enc_eki(a_i) and Ai = Com_ck(a_i, r_i), and # broadcast them. # Every party P_i chooses random values a_i, r_i in Z_p X (Z_p)^2 ai = random_number(field.modulus) r1 = random_number(field.modulus) r2 = random_number(field.modulus) # compute alpha_i = Enc_eki(a_i) pubkey = self.players[self.id].pubkey alpha_randomness = rand.randint(1, long(pubkey['n'])) alphai = encrypt_r(ai.value, alpha_randomness, pubkey) # and A_i = Com_ck(a_i, r_i). Ai = commitment.commit(ai.value, r1.value, r2.value) # choose random b_j, s_j in Z_p X (Z_p)^2. bj = random_number(field.modulus) s1 = random_number(field.modulus) s2 = random_number(field.modulus) # compute B_j = Com_ck(b_j, s_j). Bj = commitment.commit(bj.value, s1.value, s2.value) # broadcast alpha_i, A_i, B_j. ds = self.broadcast(sorted(self.players.keys()), sorted(self.players.keys()), str(alphai) + ":" + repr(Ai) + ":" + repr(Bj))
def _mul(self, inx, jnx, n, ais=None, cjs=None): """Multiply each of the field elements in *ais* with the corresponding encrypted elements in *cjs*. Returns a deferred which will yield a list of field elements. """ CKIND = 1 """The transmission_restraint_constant is the number of encrypted shares we can safely transmit in one call to sendData. The sendData method can only transmit up to 65536 bytes. The constant has been imperically determined by running TripleGenerator.generate_triples. TODO: How can we allow a user of the runtime to adjust this constraint at a higher level of abstraction? """ transmission_restraint_constant = 425 number_of_packets = n / transmission_restraint_constant if n % transmission_restraint_constant != 0: number_of_packets += 1 self.runtime.increment_pc() pc = tuple(self.runtime.program_counter) deferred = [] zis = [] if self.runtime.id == inx: Nj_square = self.paillier.get_modulus_square(jnx) all_cs = [] for iny, (ai, cj) in enumerate(zip(ais, cjs)): if iny % transmission_restraint_constant == 0: cs = [] all_cs.append(cs) u = rand.randint(0, self.u_bound) Ej_u = self.paillier.encrypt(u, jnx) cs.append( (fast_pow(cj, ai.value, Nj_square) * Ej_u) % Nj_square) zi = self.Zp(-u) zis.append(zi) for cs in all_cs: self.runtime.protocols[jnx].sendData(pc, CKIND, str(cs)) if self.runtime.id == jnx: all_cs = [] for _ in xrange(number_of_packets): cs = Deferred() self.runtime._expect_data(inx, CKIND, cs) all_cs.append(cs) def decrypt(all_cs, pc, zis): zjs = [] cs = reduce(lambda x, y: x + eval(y), all_cs, []) for iny, c in enumerate(cs): t = self.paillier.decrypt(c) zj = self.Zp(t) zjs.append(zj) if not zis == []: return [x + y for x, y in zip(zis, zjs)] else: return zjs all_cs_d = gatherResults(all_cs) all_cs_d.addCallback(decrypt, pc, zis) deferred = all_cs_d else: zis_deferred = Deferred() zis_deferred.callback(zis) deferred = zis_deferred return deferred
def encrypt(m, pubkey): r = rand.randint(1, long(pubkey['n'])) return encrypt_r(m, r, pubkey)
def random_number(p): return field(rand.randint(0, p - 1))