Beispiel #1
0
def local_main():
    current_dir = os.path.dirname(os.path.realpath(__file__))
    data_path = os.path.join(current_dir, "data")
    for pid in {"1", "2"}:
        # define name for the workflow
        workflow_name = "aspirin-local-test-" + pid
        # configure conclave
        conclave_config = CodeGenConfig(workflow_name, int(pid))
        conclave_config.all_pids = [int(pid)]
        sharemind_conf = SharemindCodeGenConfig("/mnt/shared", use_docker=False, use_hdfs=False)
        conclave_config.with_sharemind_config(sharemind_conf)
        # point conclave to the directory where the generated code should be stored/ read from
        conclave_config.code_path = os.path.join("/mnt/shared", workflow_name)
        # point conclave to directory where data is to be read from...
        conclave_config.input_path = data_path
        # and written to
        conclave_config.output_path = data_path
        suffix = "left" if pid == "1" else "right"
        # define this party's unique ID (in this demo there is only one party)
        job_queue = generate_code(lambda: protocol_local(suffix, int(pid)), conclave_config, ["sharemind"], ["python"],
                                  apply_optimizations=False)
        dispatch_jobs(job_queue, conclave_config)

    res_mpc = read_rel(data_path + "/" + "actual_mpc_open.csv")
    res_left = read_rel(data_path + "/" + "actual_left.csv")
    res_right = read_rel(data_path + "/" + "actual_right.csv")
    assert len(res_mpc) == 1
    assert len(res_left) == 1
    assert len(res_right) == 1
    res = [[res_mpc[0][0] + res_left[0][0] + res_right[0][0]]]
    write_rel(data_path, "actual_open.csv", res, "1")
Beispiel #2
0
def setup(conf: Dict):
    pid = conf["pid"]
    hdfs_node_name = conf["spark"]["hdfs"]["node_name"]
    hdfs_root = conf["spark"]["hdfs"]["root"]
    spark_master_url = conf["spark"]["master_url"]

    workflow_name = conf["workflow_name"]

    sm_config = SharemindCodeGenConfig(conf["code_path"])
    spark_config = SparkConfig(spark_master_url)

    conclave_config = CodeGenConfig(workflow_name) \
        .with_sharemind_config(sm_config) \
        .with_spark_config(spark_config)

    conclave_config.code_path = conf["code_path"] + workflow_name
    conclave_config.input_path = "hdfs://{}/{}/{}".format(
        hdfs_node_name, hdfs_root, conf["name"])
    conclave_config.output_path = "hdfs://{}/{}/{}".format(
        hdfs_node_name, hdfs_root, conf["name"])
    conclave_config.pid = pid
    conclave_config.name = workflow_name
    conclave_config.all_pids = [int(p) for p in conf["all_pids"]]

    network_config = NetworkConfig(conf["sharemind"]["parties"], pid)

    conclave_config.with_network_config(network_config)

    return conclave_config
Beispiel #3
0
def main():
    pid = sys.argv[1]
    data_root = sys.argv[2]
    backend = sys.argv[3]
    workflow_name = "hhi-benchmark-" + pid
    if backend == "python":
        sharemind_conf = SharemindCodeGenConfig("/mnt/shared",
                                                use_docker=True,
                                                use_hdfs=False)
        conclave_config = CodeGenConfig(workflow_name, int(pid))
        conclave_config.with_sharemind_config(sharemind_conf)
        conclave_config.code_path = os.path.join("/mnt/shared", workflow_name)
        conclave_config.input_path = os.path.join("/mnt/shared", data_root)
        conclave_config.output_path = os.path.join("/mnt/shared", data_root)
        generate_and_dispatch(protocol,
                              conclave_config, ["sharemind"], ["python"],
                              apply_optimizations=True)
    elif backend == "spark":
        sharemind_conf = SharemindCodeGenConfig("/mnt/shared",
                                                use_docker=True,
                                                use_hdfs=True)
        conclave_config = CodeGenConfig(workflow_name, int(pid))

        host = conclave_config.network_config["parties"][int(pid)]["host"]
        # Update this if your spark master and HDFS namenode are mapped to a different host than your Conclave node
        spark_master_url = "spark://{}:7077".format(host)
        hdfs_namenode = "{}:9000".format(host)
        spark_config = SparkConfig(spark_master_url)

        conclave_config \
            .with_sharemind_config(sharemind_conf) \
            .with_spark_config(spark_config)

        conclave_config.code_path = os.path.join("/mnt/shared", workflow_name)
        conclave_config.input_path = "hdfs://{}/{}".format(
            hdfs_namenode, data_root)
        conclave_config.output_path = "hdfs://{}/{}".format(
            hdfs_namenode, data_root)
        generate_and_dispatch(protocol,
                              conclave_config, ["sharemind"], ["spark"],
                              apply_optimizations=True)
    else:
        raise Exception("Unknown backend {}".format(backend))
Beispiel #4
0
    def check_workflow(self, dag, name):
        expected_rootdir = \
            "{}/sharemind_expected".format(os.path.dirname(os.path.realpath(__file__)))

        sm_cfg = SharemindCodeGenConfig()
        cfg = CodeGenConfig('cfg').with_sharemind_config(sm_cfg)
        cg = SharemindCodeGen(cfg, dag, 1)

        actual = cg._generate('code', '/tmp')[1]['miner']

        with open(expected_rootdir + '/{}'.format(name), 'r') as f:
            expected = f.read()

        self.assertEqual(expected, actual)
Beispiel #5
0
def run_local(pid: str, data_root: str):
    workflow_name = "aspirin-local-join-" + pid + "-" + data_root
    conclave_config = CodeGenConfig(workflow_name, int(pid))
    conclave_config.all_pids = [int(pid)]
    sharemind_conf = SharemindCodeGenConfig("/mnt/shared",
                                            use_docker=False,
                                            use_hdfs=False)
    conclave_config.with_sharemind_config(sharemind_conf)
    conclave_config.code_path = os.path.join("/mnt/shared", workflow_name)
    conclave_config.input_path = os.path.join("/mnt/shared", data_root)
    conclave_config.output_path = os.path.join("/mnt/shared", data_root)
    suffix = "left" if pid == "1" else "right"

    job_queue = generate_code(lambda: protocol_local(suffix, int(pid)),
                              conclave_config, ["sharemind"], ["python"],
                              apply_optimizations=False)
    dispatch_jobs(job_queue, conclave_config)
Beispiel #6
0
    def check_workflow(self, dag, name, use_leaky_ops=True):
        self.maxDiff = None
        expected_rootdir = \
            "{}/sharemind_expected".format(os.path.dirname(os.path.realpath(__file__)))

        sm_cfg = SharemindCodeGenConfig()
        cfg = CodeGenConfig('cfg').with_sharemind_config(sm_cfg)
        cfg.use_leaky_ops = use_leaky_ops
        cg = SharemindCodeGen(cfg, dag, 1)

        actual = cg._generate('code', '/tmp')[1]['miner']

        with open(expected_rootdir + '/{}'.format(name),
                  'r') as f_specific, open(
                      expected_rootdir + '/{}'.format("base"), 'r') as f_base:
            expected_base = f_base.read()
            expected_specific = f_specific.read()
            expected = expected_base + expected_specific

        self.assertEqual(expected, actual)
Beispiel #7
0
def main():
    pid = sys.argv[1]
    data_root = sys.argv[2]
    mpc_backend = sys.argv[3]

    # define name for the workflow
    workflow_name = "aspirin-large-join-" + pid + "-" + data_root
    # configure conclave
    conclave_config = CodeGenConfig(workflow_name, int(pid))
    if mpc_backend == "sharemind":
        sharemind_conf = SharemindCodeGenConfig("/mnt/shared",
                                                use_docker=True,
                                                use_hdfs=False)
        conclave_config.with_sharemind_config(sharemind_conf)
    elif mpc_backend == "obliv-c":
        conclave_config.all_pids = [1, 2]
        net_conf = [{
            "host": "ca-spark-node-0",
            "port": 8001
        }, {
            "host": "cb-spark-node-0",
            "port": 8002
        }]
        net = NetworkConfig(net_conf, int(pid))
        conclave_config.with_network_config(net)

        oc_conf = OblivcConfig("/obliv-c/bin/oblivcc", "ca-spark-node-0:9000")
        conclave_config.with_oc_config(oc_conf)
    else:
        raise Exception("Unknown MPC backend {}".format(mpc_backend))

    conclave_config.code_path = os.path.join("/mnt/shared", workflow_name)
    conclave_config.input_path = os.path.join("/mnt/shared", data_root)
    conclave_config.output_path = os.path.join("/mnt/shared", data_root)

    job_queue = generate_code(protocol,
                              conclave_config, [mpc_backend], ["python"],
                              apply_optimizations=True)
    dispatch_jobs(job_queue, conclave_config)
Beispiel #8
0
    joined = cc.join(left, right, "joined", ["a"], ["c"])
    cc.aggregate(joined, "expected", ["b"], "d", "sum", "total")

    return {left, right}


if __name__ == "__main__":
    pid = sys.argv[1]
    # define name for the workflow
    workflow_name = "simple-oblivious-test-" + pid
    # configure conclave
    conclave_config = CodeGenConfig(workflow_name, int(pid))
    conclave_config.all_pids = [1]
    sharemind_conf = SharemindCodeGenConfig("/mnt/shared",
                                            use_docker=False,
                                            use_hdfs=False)
    conclave_config.with_sharemind_config(sharemind_conf)
    current_dir = os.path.dirname(os.path.realpath(__file__))
    # point conclave to the directory where the generated code should be stored/ read from
    conclave_config.code_path = os.path.join("/mnt/shared", workflow_name)
    # point conclave to directory where data is to be read from...
    conclave_config.input_path = os.path.join(current_dir, "data")
    # and written to
    conclave_config.output_path = os.path.join(current_dir, "data")
    # define this party's unique ID (in this demo there is only one party)
    job_queue = generate_code(protocol,
                              conclave_config, ["sharemind"], ["python"],
                              apply_optimizations=False)
    dispatch_jobs(job_queue, conclave_config)