def __init__(self, public_keys, private_key, crs, n, t, my_id, send, recv, pc=None, field=ZR): # (# noqa: E501) self.public_keys, self.private_key = public_keys, private_key self.n, self.t, self.my_id = n, t, my_id self.g = crs[0] # Create a mechanism to split the `recv` channels based on `tag` self.subscribe_recv_task, self.subscribe_recv = subscribe_recv(recv) # Create a mechanism to split the `send` channels based on `tag` def _send(tag): return wrap_send(tag, send) self.get_send = _send # This is added to consume the share the moment it is generated. # This is especially helpful when running multiple AVSSes in parallel. self.output_queue = asyncio.Queue() self.field = field self.poly = polynomials_over(self.field) if pc is None: self.poly_commit = PolyCommitLin(crs, field=self.field) self.poly_commit.preprocess(5) else: self.poly_commit = pc
def test_poly_commit(): poly_commit = PolyCommitLin([G1.rand(), G1.rand()]) degree = randint(10, 50) phi = polynomials_over(ZR).random(degree) cs, aux = poly_commit.commit(phi) i = randint(0, degree - 1) witness = poly_commit.create_witness(aux, i) assert poly_commit.verify_eval(cs, i, phi(i), witness)
def test_benchmark_hbavss_lite_dealer(test_router, benchmark, t, k): loop = asyncio.get_event_loop() field = ZR n = 3 * t g, h, pks, sks = get_avss_params(n, t) crs = [g, h] pc = PolyCommitLin(crs, field=field) pc.preprocess(8) values = [field.random() for _ in range(k)] params = (t, n, g, h, pks, sks, crs, pc, values, field) def _prog(): loop.run_until_complete(hbavss_light_batch_dealer(test_router, params)) benchmark(_prog)
def test_benchmark_create_witness(benchmark, t): g = G1.rand() h = G1.rand() pc = PolyCommitLin([g, h]) phi_hat = polynomials_over(ZR).random(t) i = ZR.random() benchmark(pc.create_witness, phi_hat, i)
class HbAvssLight: def __init__(self, public_keys, private_key, crs, n, t, my_id, send, recv, pc=None, field=ZR): # (# noqa: E501) self.public_keys, self.private_key = public_keys, private_key self.n, self.t, self.my_id = n, t, my_id self.g = crs[0] # Create a mechanism to split the `recv` channels based on `tag` self.subscribe_recv_task, self.subscribe_recv = subscribe_recv(recv) # Create a mechanism to split the `send` channels based on `tag` def _send(tag): return wrap_send(tag, send) self.get_send = _send # This is added to consume the share the moment it is generated. # This is especially helpful when running multiple AVSSes in parallel. self.output_queue = asyncio.Queue() self.field = field self.poly = polynomials_over(self.field) if pc is None: self.poly_commit = PolyCommitLin(crs, field=self.field) self.poly_commit.preprocess(5) else: self.poly_commit = pc def __enter__(self): return self def __exit__(self, type, value, traceback): self.subscribe_recv_task.cancel() def _handle_implication(self, commitments, ephemeral_public_key, j, j_sk, j_z): """ Handle the implication of AVSS. Return True if the implication is valid, False otherwise. """ print("got implication") # discard if PKj ! = g^SKj if self.public_keys[j] != pow(self.g, j_sk): return False # decrypt and verify j_shared_key = pow(ephemeral_public_key, j_sk) try: j_shares, j_auxs = SymmetricCrypto.decrypt( str(j_shared_key).encode(), j_z) except Exception: # TODO: specific exception return True return not self.poly_commit.batch_verify_eval(commitments, j + 1, j_shares, j_auxs) async def _process_avss_msg(self, avss_id, dealer_id, avss_msg): tag = f"{dealer_id}-{avss_id}-AVSS" send, recv = self.get_send(tag), self.subscribe_recv(tag) def multicast(msg): for i in range(self.n): send(i, msg) commitments, ephemeral_public_key, encrypted_blobs = loads(avss_msg) shared_key = pow(ephemeral_public_key, self.private_key) share_valid = True try: shares, witnesses = SymmetricCrypto.decrypt( str(shared_key).encode(), encrypted_blobs[self.my_id]) if self.poly_commit.batch_verify_eval(commitments, self.my_id + 1, shares, witnesses): logger.info(f"OK_timestamp: {time.time()}") multicast((HbAVSSMessageType.OK, "")) else: multicast((HbAVSSMessageType.IMPLICATE, self.private_key)) share_valid = False except Exception: # TODO specific exceptions multicast((HbAVSSMessageType.IMPLICATE, self.private_key)) share_valid = False # RECEIVE LOOP ok_set = set() recovery_set = set() implicate_set = set() recovery_shares = [[] for _ in range(len(commitments))] sent_recovery = False output = False recovered = False while True: if len(ok_set) == 2 * self.t + 1 and share_valid and not output: if len(commitments) == 1: self.output_queue.put_nowait( (dealer_id, avss_id, int(shares[0]))) else: int_shares = [int(shares[i]) for i in range(len(shares))] self.output_queue.put_nowait( (dealer_id, avss_id, int_shares)) output = True elif len(recovery_set) == self.t + 1 and not output: if len(commitments) == 1: shares = [ self.poly.interpolate_at(recovery_shares[0], self.my_id + 1) ] self.output_queue.put_nowait( (dealer_id, avss_id, int(shares[0]))) else: shares = [None] * len(commitments) share_ints = [None] * len(commitments) for i in range(len(commitments)): shares[i] = self.poly.interpolate_at( recovery_shares[i], self.my_id + 1) share_ints[i] = int(shares[i]) self.output_queue.put_nowait( (dealer_id, avss_id, share_ints)) output = True share_valid = True recovered = True multicast((HbAVSSMessageType.OK, "")) # Conditions where we can terminate if (len(ok_set) == self.n or len(implicate_set) >= self.t + 1 or len(ok_set) >= 2 * self.t + 1 and (sent_recovery or recovered)): break sender, avss_msg = await recv( ) # First value is `sid` (not true anymore?) if avss_msg[0] == HbAVSSMessageType.OK and sender not in ok_set: ok_set.add(sender) if (avss_msg[0] == HbAVSSMessageType.IMPLICATE and sender not in implicate_set): implicate_set.add(sender) if (avss_msg[0] == HbAVSSMessageType.IMPLICATE and not sent_recovery and share_valid): j_sk = avss_msg[1] j = sender # validate the implicate if not self._handle_implication(commitments, ephemeral_public_key, j, j_sk, encrypted_blobs[j]): # Count an invalid implicate as an okay if sender not in ok_set: ok_set.add(sender) continue sent_recovery = True multicast((HbAVSSMessageType.RECOVERY, self.private_key)) if (avss_msg[0] == HbAVSSMessageType.RECOVERY and not share_valid and sender not in recovery_set): try: shares_j, auxs_j = SymmetricCrypto.decrypt( str(ephemeral_public_key**avss_msg[1]).encode(), encrypted_blobs[sender], ) # (# noqa: E501) except Exception: ok_set.add(sender) continue if self.poly_commit.batch_verify_eval(commitments, sender + 1, shares_j, auxs_j): for i in range(len(commitments)): recovery_shares[i].append([sender + 1, shares_j[i]]) recovery_set.add(sender) def _get_dealer_msg(self, value): if type(value) in (list, tuple): valuelist = value else: valuelist = [value] philist, commitlist, auxlist = [], [], [] for val in valuelist: phi = self.poly.random(self.t, val) philist.append(phi) # Todo: precompute commit stuff commitment, aux_poly = self.poly_commit.commit(phi) commitlist.append(commitment) auxlist.append(aux_poly) ephemeral_secret_key = self.field.random() ephemeral_public_key = pow(self.g, ephemeral_secret_key) z = [None] * self.n for i in range(self.n): shared_key = pow(self.public_keys[i], ephemeral_secret_key) shares, witnesses = [], [] for j in range(len(philist)): shares.append(philist[j](i + 1)) witnesses.append( self.poly_commit.create_witness(auxlist[j], i + 1)) z[i] = SymmetricCrypto.encrypt( str(shared_key).encode(), (shares, witnesses)) return dumps((commitlist, ephemeral_public_key, z)) async def avss(self, avss_id, value=None, dealer_id=None, client_mode=False): """ avss_id: This must be an integer. This must start from 0 per dealer. This is important since it used to ensure an in order delivery of values at each node per dealer i.e. if a node deals two values, then the shares of those values need to be received in the order that they are dealt. Eg: => If there are 4 nodes and node 0 wants to deal two values: node 0: avss(0, value=value1, dealer_id=0) node 1: avss(0, dealer_id=0) node 2: avss(0, dealer_id=0) node 3: avss(0, dealer_id=0) node 0: avss(1, value=value2, dealer_id=0) node 1: avss(1, dealer_id=0) node 2: avss(1, dealer_id=0) node 3: avss(1, dealer_id=0) => Now, if node 1 wants to deal a value next, => the avss_id still must start from 0: node 0: avss(0, value=value3, dealer_id=1) node 1: avss(0, dealer_id=1) node 2: avss(0, dealer_id=1) node 3: avss(0, dealer_id=1) """ # If `value` is passed then the node is a 'Sender' # `dealer_id` must be equal to `self.my_id` if value is not None: if dealer_id is None: dealer_id = self.my_id assert dealer_id == self.my_id, "Only dealer can share a value." # If `value` is not passed then the node is a 'Recipient' # Verify that the `dealer_id` is not the same as `self.my_id` elif dealer_id is not None: assert dealer_id != self.my_id if client_mode: assert dealer_id is not None assert dealer_id == self.n assert type(avss_id) is int logger.debug( "[%d] Starting Light AVSS. Id: %s, Dealer Id: %d, Client Mode: %s", self.my_id, avss_id, dealer_id, client_mode, ) broadcast_msg = None if self.my_id != dealer_id else self._get_dealer_msg( value) # In the client_mode, the dealer is the last node n = self.n if not client_mode else self.n + 1 tag = f"{dealer_id}-{avss_id}-RBC" send, recv = self.get_send(tag), self.subscribe_recv(tag) avss_msg = await reliablebroadcast( tag, self.my_id, n, self.t, dealer_id, broadcast_msg, recv, send, client_mode=client_mode, ) if client_mode and self.my_id == dealer_id: # In client_mode, the dealer is not supposed to do # anything after sending the initial value. return logger.debug("[%d] RBC completed.", self.my_id) await self._process_avss_msg(avss_id, dealer_id, avss_msg) logger.debug("[%d] AVSS [%s] completed.", self.my_id, avss_id) async def avss_parallel(self, avss_id, k, values=None, dealer_id=None): """ Run a HbAVSSLight in parallel for each of the values. avss_id: This must be an integer. This must start from 0 per dealer. Look at the `avss` method above for a detailed explanation. """ if values is not None: assert len(values) == k avss_tasks = [None] * k for i in range(k): v = None if values is None else values[i] avss_tasks[i] = asyncio.create_task( self.avss(k * avss_id + i, v, dealer_id)) return await asyncio.gather(*avss_tasks)
def test_benchmark_commit(benchmark, t): g = G1.rand() h = G1.rand() pc = PolyCommitLin([g, h]) phi = polynomials_over(ZR).random(t) benchmark(pc.commit, phi)