async def _mixing_initiate_loop(self): # Task 4. Initiate mixing epochs contract_concise = ConciseContract(self.contract) K = contract_concise.K() # noqa: N806 while True: # Step 4.a. Wait until there are k values then call initiate_mix while True: inputs_ready = contract_concise.inputs_ready() mixes_avail = contract_concise.mixes_available() if inputs_ready >= K and mixes_avail >= 1: break await asyncio.sleep(5) # Step 4.b. Call initiate mix tx_hash = self.contract.functions.initiate_mix().transact( {"from": self.w3.eth.accounts[0]} ) tx_receipt = await wait_for_receipt(self.w3, tx_hash) rich_logs = self.contract.events.MixingEpochInitiated().processReceipt( tx_receipt ) if rich_logs: epoch = rich_logs[0]["args"]["epoch"] logging.info(f"[{self.myid}] Mixing epoch initiated: {epoch}") else: logging.info(f"[{self.myid}] initiate_mix failed (redundant?)") await asyncio.sleep(10)
async def _offline_inputmasks_loop(self): contract_concise = ConciseContract(self.contract) n = contract_concise.n() t = contract_concise.t() K = contract_concise.K() # noqa: N806 preproc_round = 0 k = K // (n - 2 * t) # batch size while True: # Step 1b. I) Wait until needed while True: inputmasks_available = contract_concise.inputmasks_available() totalmasks = contract_concise.preprocess()[2] # Policy: try to maintain a buffer of 10 * K input masks target = 10 * K if inputmasks_available < target: break # already have enough input masks, sleep await asyncio.sleep(5) # Step 1b. II) Run Randousha logging.info(f"[{self.myid}] totalmasks: {totalmasks} \ inputmasks available: {inputmasks_available} \ target: {target} Initiating Randousha {k * (n - 2*t)}") send, recv = self.get_send_recv( f"preproc:inputmasks:{preproc_round}") start_time = time.time() rs_t, rs_2t = zip( *await randousha(n, t, k, self.myid, send, recv, field)) assert len(rs_t) == len(rs_2t) == k * (n - 2 * t) # Note: here we just discard the rs_2t # In principle both sides of randousha could be used with # a small modification to randousha end_time = time.time() logging.info( f"[{self.myid}] Randousha finished in {end_time-start_time}") self._inputmasks += rs_t # Step 1b. III) Submit an updated report await self._preprocess_report() # Increment the preprocessing round and continue preproc_round += 1
async def _mixing_loop(self): # Task 3. Participating in mixing epochs contract_concise = ConciseContract(self.contract) pp_elements = PreProcessedElements() n = contract_concise.n() t = contract_concise.t() K = contract_concise.K() # noqa: N806 PER_MIX_TRIPLES = contract_concise.PER_MIX_TRIPLES() # noqa: N806 PER_MIX_BITS = contract_concise.PER_MIX_BITS() # noqa: N806 epoch = 0 while True: # 3.a. Wait for the next mix to be initiated while True: epochs_initiated = contract_concise.epochs_initiated() if epochs_initiated > epoch: break await asyncio.sleep(5) # 3.b. Collect the inputs inputs = [] for idx in range(epoch * K, (epoch + 1) * K): # Get the public input masked_input, inputmask_idx = contract_concise.input_queue(idx) masked_input = field(int.from_bytes(masked_input, "big")) # Get the input masks inputmask = self._inputmasks[inputmask_idx] m_share = masked_input - inputmask inputs.append(m_share) # 3.c. Collect the preprocessing triples = self._triples[ (epoch + 0) * PER_MIX_TRIPLES : (epoch + 1) * PER_MIX_TRIPLES ] bits = self._bits[(epoch + 0) * PER_MIX_BITS : (epoch + 1) * PER_MIX_BITS] # Hack explanation... the relevant mixins are in triples key = (self.myid, n, t) for mixin in (pp_elements._triples, pp_elements._one_minus_ones): if key in mixin.cache: del mixin.cache[key] del mixin.count[key] # 3.d. Call the MPC program async def prog(ctx): pp_elements._init_data_dir() # Overwrite triples and one_minus_ones for kind, elems in zip(("triples", "one_minus_one"), (triples, bits)): if kind == "triples": elems = flatten_lists(elems) elems = [e.value for e in elems] mixin = pp_elements.mixins[kind] mixin_filename = mixin.build_filename(ctx.N, ctx.t, ctx.myid) mixin._write_preprocessing_file( mixin_filename, ctx.t, ctx.myid, elems, append=False ) pp_elements._init_mixins() logging.info(f"[{ctx.myid}] Running permutation network") inps = list(map(ctx.Share, inputs)) assert len(inps) == K shuffled = await iterated_butterfly_network(ctx, inps, K) shuffled_shares = ctx.ShareArray(list(map(ctx.Share, shuffled))) opened_values = await shuffled_shares.open() msgs = [ m.value.to_bytes(32, "big").decode().strip("\x00") for m in opened_values ] return msgs send, recv = self.get_send_recv(f"mpc:{epoch}") logging.info(f"[{self.myid}] MPC initiated:{epoch}") # Config just has to specify mixins used by switching_network config = {MixinConstants.MultiplyShareArray: BeaverMultiplyArrays()} ctx = Mpc(f"mpc:{epoch}", n, t, self.myid, send, recv, prog, config) result = await ctx._run() logging.info(f"[{self.myid}] MPC complete {result}") # 3.e. Output the published messages to contract result = ",".join(result) tx_hash = self.contract.functions.propose_output(epoch, result).transact( {"from": self.w3.eth.accounts[self.myid]} ) tx_receipt = await wait_for_receipt(self.w3, tx_hash) rich_logs = self.contract.events.MixOutput().processReceipt(tx_receipt) if rich_logs: epoch = rich_logs[0]["args"]["epoch"] output = rich_logs[0]["args"]["output"] logging.info(f"[{self.myid}] MIX OUTPUT[{epoch}] {output}") else: pass epoch += 1 pass