Ejemplo n.º 1
0
def setup(conf: Dict):
    pid = conf["pid"]
    hdfs_node_name = conf["spark"]["hdfs"]["node_name"]
    hdfs_root = conf["spark"]["hdfs"]["root"]
    spark_master_url = conf["spark"]["master_url"]

    workflow_name = conf["workflow_name"]

    sm_config = SharemindCodeGenConfig(conf["code_path"])
    spark_config = SparkConfig(spark_master_url)

    conclave_config = CodeGenConfig(workflow_name) \
        .with_sharemind_config(sm_config) \
        .with_spark_config(spark_config)

    conclave_config.code_path = conf["code_path"] + workflow_name
    conclave_config.input_path = "hdfs://{}/{}/{}".format(
        hdfs_node_name, hdfs_root, conf["name"])
    conclave_config.output_path = "hdfs://{}/{}/{}".format(
        hdfs_node_name, hdfs_root, conf["name"])
    conclave_config.pid = pid
    conclave_config.name = workflow_name
    conclave_config.all_pids = [int(p) for p in conf["all_pids"]]

    network_config = NetworkConfig(conf["sharemind"]["parties"], pid)

    conclave_config.with_network_config(network_config)

    return conclave_config
Ejemplo n.º 2
0
def setup(conf: dict):
    # GENERAL
    pid = conf["user_config"]["pid"]
    workflow_name = conf["user_config"]["workflow_name"]
    all_pids = conf["user_config"]['all_pids']
    use_leaky = conf["user_config"]["leaky_ops"]

    conclave_config = CodeGenConfig(workflow_name)

    # SPARK
    try:
        spark_avail = conf["backends"]["spark"]["available"]
        if spark_avail:
            spark_master_url = conf["backends"]["spark"]["master_url"]
            spark_config = SparkConfig(spark_master_url)
            conclave_config.with_spark_config(spark_config)
    except KeyError:
        pass

    # OBLIV-C
    try:
        oc_avail = conf["backends"]["oblivc"]["available"]
        if oc_avail:
            oc_path = conf["backends"]["oblivc"]["oc_path"]
            ip_port = conf["backends"]["oblivc"]["ip_port"]
            oc_config = OblivcConfig(oc_path, ip_port)
            conclave_config.with_oc_config(oc_config)
    except KeyError:
        pass

    # JIFF
    try:
        jiff_avail = conf["backends"]["jiff"]["available"]
        if jiff_avail:
            jiff_path = conf["backends"]["jiff"]["jiff_path"]
            party_count = conf["backends"]["jiff"]["party_count"]
            server_ip = conf["backends"]["jiff"]["server_ip"]
            server_port = conf["backends"]["jiff"]["server_port"]
            jiff_config = JiffConfig(jiff_path, party_count, server_ip,
                                     server_port)
            conclave_config.with_jiff_config(jiff_config)
    except KeyError:
        pass

    # NET
    hosts = conf["net"]["parties"]
    net_config = NetworkConfig(hosts, pid)
    conclave_config.with_network_config(net_config)

    conclave_config.pid = pid
    conclave_config.all_pids = all_pids
    conclave_config.name = workflow_name
    conclave_config.use_leaky_ops = use_leaky

    conclave_config.code_path = conf["user_config"]["paths"]["code_path"]
    conclave_config.output_path = conf["user_config"]["paths"]["output_path"]
    conclave_config.input_path = conf["user_config"]["paths"]["input_path"]

    return conclave_config
Ejemplo n.º 3
0
    hdfs_root = sys.argv[3]
    spark_master_url = sys.argv[4]

    workflow_name = "job-" + str(pid)
    sm_config = SharemindCodeGenConfig("/mnt/shared")
    spark_config = SparkConfig(spark_master_url)
    conclave_config = CodeGenConfig(workflow_name) \
        .with_sharemind_config(sm_config) \
        .with_spark_config(spark_config)
    conclave_config.code_path = "/mnt/shared/" + workflow_name
    conclave_config.input_path = "hdfs://{}/{}/taxi".format(
        hdfs_namenode, hdfs_root)
    conclave_config.output_path = "hdfs://{}/{}/taxi".format(
        hdfs_namenode, hdfs_root)
    conclave_config.pid = pid
    conclave_config.name = workflow_name
    network_config = {
        "pid": pid,
        "parties": {
            1: {
                "host": "ca-spark-node-0",
                "port": 9001
            },
            2: {
                "host": "cb-spark-node-0",
                "port": 9002
            },
            3: {
                "host": "cc-spark-node-0",
                "port": 9003
            }