async def run_common_subset_in_processes(config, pbk, pvk, n, f, nodeid):
    sid = "sidA"

    async with ProcessProgramRunner(config, n, f, nodeid) as program_runner:
        send, recv = program_runner.get_send_recv(sid)

        def bcast(o):
            for i in range(n):
                send(i, o)

        input_q = asyncio.Queue(1)

        create_acs_task = asyncio.create_task(
            make_commonsubset(
                sid, nodeid, n, f, pbk, pvk, input_q.get, send, recv, bcast
            )
        )

        start_time = time.time()
        await input_q.put("<[ACS Input %d]>" % nodeid)
        acs, recv_tasks, work_tasks = await create_acs_task
        acs_output = await acs
        await asyncio.gather(*work_tasks)
        for task in recv_tasks:
            task.cancel()

        logging.info(f"OUTPUT: {acs_output}")
        logging.info(f"Elapsed time: {time.time() - start_time}")
Exemple #2
0
async def _run(peers, n, t, my_id, batch_size):
    g, h, pks, sks = get_avss_params(n + 1, t)
    async with ProcessProgramRunner(peers, n + 1, t, my_id) as runner:
        send, recv = runner.get_send_recv("HBAVSS_BATCH")
        crs = gen_pc_const_crs(t, g=g, h=h)
        values = None
        dealer_id = n
        if my_id == dealer_id:
            # Dealer
            values = [ZR.random(0)] * batch_size
            logger.info("Starting DEALER")
            logger.info(f"Dealer timestamp: {time.time()}")
        else:
            logger.info("Starting RECIPIENT: %d", my_id)

        with HbAvssBatch(pks, sks[my_id], crs, n, t, my_id, send,
                         recv) as hbavss:
            begin_time = time.time()
            if my_id != dealer_id:
                hbavss_task = asyncio.create_task(
                    hbavss.avss(0,
                                dealer_id=dealer_id,
                                values=values,
                                client_mode=True))
                await hbavss.output_queue.get()
                end_time = time.time()
                logger.info(f"Recipient time: {(end_time - begin_time)}")
                hbavss_task.cancel()
            else:
                await hbavss.avss(0,
                                  dealer_id=dealer_id,
                                  values=values,
                                  client_mode=True)
                end_time = time.time()
                logger.info(f"Dealer time: {(end_time - begin_time)}")
Exemple #3
0
async def _run(peers, n, t, my_id):
    from honeybadgermpc.ipc import ProcessProgramRunner

    async with ProcessProgramRunner(peers, n, t, my_id, mpc_config) as runner:
        await runner.execute("0", prog)
        bytes_sent = runner.node_communicator.bytes_sent
        print(f"[{my_id}] Total bytes sent out: {bytes_sent}")
Exemple #4
0
async def _run(peers, n, t, k, my_id):
    field = GF(Subgroup.BLS12_381)
    async with ProcessProgramRunner(peers, n, t, my_id) as runner:
        send, recv = runner.get_send_recv("0")
        start_time = time.time()
        await randousha(n, t, k, my_id, send, recv, field)
        end_time = time.time()
        logging.info("[%d] Finished in %s", my_id, end_time - start_time)
async def _run(peers, n, t, my_id):
    from honeybadgermpc.ipc import ProcessProgramRunner
    from honeybadgermpc.progs.mixins.share_arithmetic import (
        MixinConstants,
        BeaverMultiplyArrays,
    )

    mpc_config = {MixinConstants.MultiplyShareArray: BeaverMultiplyArrays()}
    async with ProcessProgramRunner(peers, n, t, my_id, mpc_config) as runner:
        runner.execute("0", butterfly_network_helper, k=k)
async def run_common_coin(config, pbk, pvk, n, f, nodeid):
    async with ProcessProgramRunner(config, n, f, nodeid) as program_runner:
        send, recv = program_runner.get_send_recv("coin")

        def broadcast(o):
            for i in range(n):
                send(i, o)

        coin, crecv_task = await shared_coin("sidA", nodeid, n, f, pbk, pvk,
                                             broadcast, recv)
        for i in range(10):
            logger.info("[%d] %d COIN VALUE: %s", nodeid, i, await coin(i))
        crecv_task.cancel()
async def run_binary_agreement(config, pbk, pvk, n, f, nodeid):
    from honeybadgermpc.broadcast.commoncoin import shared_coin
    import random

    sid_c = "sid_coin"
    sid_ba = "sid_ba"

    async with ProcessProgramRunner(config, n, f, nodeid) as program_runner:
        send_c, recv_c = program_runner.get_send_recv(sid_c)

        def bcast_c(o):
            for i in range(n):
                send_c(i, o)

        coin, crecv_task = await shared_coin(
            sid_c, nodeid, n, f, pbk, pvk, bcast_c, recv_c
        )

        inputq = asyncio.Queue()
        outputq = asyncio.Queue()

        send_ba, recv_ba = program_runner.get_send_recv(sid_ba)

        def bcast_ba(o):
            for i in range(n):
                send_ba(i, o)

        ba_task = binaryagreement(
            sid_ba,
            nodeid,
            n,
            f,
            coin,
            inputq.get,
            outputq.put_nowait,
            bcast_ba,
            recv_ba,
        )

        inputq.put_nowait(random.randint(0, 1))

        await ba_task

        logger.info("[%d] BA VALUE: %s", nodeid, await outputq.get())
        # logger.info("[%d] COIN VALUE: %s", nodeid, await coin(0))
        crecv_task.cancel()
Exemple #8
0
async def async_mixing_in_processes(network_info, n, t, k, run_id, node_id):
    from .solver.solver import solve
    from honeybadgermpc.ipc import ProcessProgramRunner
    from honeybadgermpc.utils.task_pool import TaskPool

    file_prefixes = [uuid.uuid4().hex for _ in range(k)]
    async with ProcessProgramRunner(network_info, n, t, node_id) as runner:
        await runner.execute("0",
                             all_secrets_phase1,
                             k=k,
                             file_prefixes=file_prefixes)
        logging.info("Phase 1 completed.")

        pool = TaskPool(256)
        stime = time()
        for i in range(k):
            pool.submit(phase2(node_id, run_id, file_prefixes[i]))
        await pool.close()

        bench_logger = logging.LoggerAdapter(
            logging.getLogger("benchmark_logger"),
            {"node_id": HbmpcConfig.my_id})

        bench_logger.info(
            f"[Phase2] Execute CPP code for all secrets: {time() - stime}")
        logging.info("Phase 2 completed.")

        power_sums = await runner.execute("1", phase3, k=k, run_id=run_id)

        logging.info("Shares from C++ phase opened.")
        stime = time()
        result = solve([s.value for s in power_sums])
        bench_logger.info(f"[SolverPhase] Run Newton Solver: {time() - stime}")
        logging.info("Equation solver completed.")
        logging.debug(result)
        return result
Exemple #9
0
async def _prog(peers, n, t, my_id):
    async with ProcessProgramRunner(peers, n, t, my_id) as runner:
        send, recv = runner.get_send_recv(0)
        runner.execute(1, _mpc_prog, randoms=get_random(n, t, my_id, send, recv))