Пример #1
0
def bin_op_rh_const_test(protocol, task_id, tf_op, x_init, y_init, expect_val):
    Result = True
    local_g = tf.Graph()
    with local_g.as_default():
        X = tf.Variable(x_init)
        Z = tf_op(X, y_init)
        rv_Z = rtt.SecureReveal(Z)
        init = tf.compat.v1.global_variables_initializer()

        try:
            rtt.activate(protocol, task_id=task_id)
            config = tf.ConfigProto(inter_op_parallelism_threads=16,
                                    intra_op_parallelism_threads=16)
            with tf.Session(task_id=task_id, config=config) as sess:
                sess.run(init)
                real_Z = sess.run(rv_Z)
                res = check_mpc_results(real_Z, expect_val)
                if (res == False):
                    Result = False
            rtt.deactivate(task_id=task_id)
        except Exception as e:
            print(str(e))
            Result = False

    return Result
Пример #2
0
def test(task_id):
    rtt.py_protocol_handler.set_loglevel(0)
    np.set_printoptions(suppress=True)

    rtt.activate("SecureNN", task_id=task_id)
    print('begin get io wrapper', task_id)
    node_id = rtt.get_current_node_id(task_id=task_id)
    print('end get io wrapper', task_id)
    dg = tf.Graph()
    with dg.as_default():
        # Get private data from Alice (input x), Bob (input y)
        w = tf.Variable(rtt.private_input(0, [[1, 2], [2, 3]],
                                          task_id=task_id))
        x = tf.Variable(rtt.private_input(1, [[1, 2], [2, 3]],
                                          task_id=task_id))
        y = tf.Variable(rtt.private_input(2, [[1, 2], [2, 3]],
                                          task_id=task_id))

        # Define matmul operation
        res = tf.matmul(tf.matmul(w, x), y)
        init = tf.global_variables_initializer()
        config = tf.ConfigProto(inter_op_parallelism_threads=16,
                                intra_op_parallelism_threads=16)

        with tf.Session(task_id=task_id, config=config) as sess:
            sess.run(init)
            #rW, rb = sess.run([reveal_W, reveal_b])
            #print("init weight:{} \nbias:{}".format(rW, rb))

            #Y_pred = sess.run(reveal_Y, feed_dict={X: real_X, Y: real_Y})
            #print("Y_pred:", Y_pred)
            sess.run(res)

        print(rtt.get_perf_stats(pretty=True, task_id=task_id))
        rtt.deactivate(task_id=task_id)
Пример #3
0
def test_const_mul():
    rtt.activate("SecureNN")
    in1 = tf.Variable([1, 2], name="a")
    in2 = tf.constant([1, 2], name="b")
    ret = tf.multiply(in1, in2)

    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        result = sess.run(ret)

    print(result)
Пример #4
0
def test_add_grad(X, Y, out_g, protocol="Helix"):
    cb.activate(protocol)

    global sess
    if sess is not None:
        sess.close()

    # ===========================
    # init global var
    # ===========================
    init = tf.compat.v1.global_variables_initializer()
    sess = tf.compat.v1.Session()
    sess.run(init)

    # ===========================
    # run mpc add grad
    # ===========================
    print("===========================")
    print("run mpc add(X + Y) grad")
    mpc_Z = cb.SecureAdd(X, Y)
    mpc_g = tf.gradients(
        mpc_Z,
        [common.get_var_from_rtt_tensor(X),
         common.get_var_from_rtt_tensor(Y)])
    print(sess.run(mpc_g))
    print("===========================")

    # ===========================
    # reveal value
    # ===========================
    mpc_out_g = []
    for i in range(len(mpc_g)):
        print("---------- Reveal mpcadd grad ------------")
        mpc_out_g.append(sess.run(cb.SecureReveal(mpc_g[i])))
        print(mpc_out_g)
        print("------------------------------------------")

    # ===========================
    # check mpc add grads value
    # ===========================
    global res_flag
    res_flag = common.check_mpc_op_grads(out_g, mpc_out_g)
Пример #5
0
def test_sigmoidcrocssentropy_grad(logits, labels, out_g, protocol="Helix"):
    rst.activate(protocol)

    global sess
    if sess is not None:
        sess.close()

    # ===========================
    # init global var
    # ===========================
    init = tf.compat.v1.global_variables_initializer()
    sess = tf.compat.v1.Session()
    sess.run(init)

    # ===========================
    # run mpc SCE grad:
    # ===========================
    print("===========================")
    print("run mpc SCE(X,Y) grad")
    Z_mpc = rst.secure_sigmoid_cross_entropy_with_logits(logits=logits,
                                                         labels=labels)
    mpc_g = tf.gradients(Z_mpc, [
        common.get_var_from_rtt_tensor(logits),
        common.get_var_from_rtt_tensor(labels)
    ])
    print(sess.run(mpc_g))
    print("===========================")

    # ===========================
    # check mpcSCE grads value
    # ===========================
    mpc_out_g = []
    for i in range(len(mpc_g)):
        print("---------- Reveal mpcSCE grad ------------")
        mpc_out_g.append(sess.run(rst.SecureReveal(mpc_g[i])))
        print(mpc_out_g)
        print("------------------------------------------")

    global res_flag
    res_flag = res_flag and common.check_mpc_op_grads(out_g, mpc_out_g)
Пример #6
0
def test_protocol(protocol_name="SecureNN"):
    rst.activate(protocol_name)
    PRI_LOGITS = rst.private_input(0, np_a)
    PRI_LABELS = rst.private_input(1, np_b)

    PRI_logits = tf.Variable(PRI_LOGITS, dtype=tf.string)
    PRI_labels = tf.Variable(PRI_LABELS, dtype=tf.string)

    init = tf.compat.v1.global_variables_initializer()
    PRI_sess = tf.compat.v1.Session()
    PRI_sess.run(init)

    start_t = time.time()
    result_mpc = rst.secure_sigmoid_cross_entropy_with_logits(
        logits=PRI_logits, labels=PRI_labels)
    PRI_sess.run(result_mpc)
    end_t = time.time()
    reveal_op = rst.SecureReveal(result_mpc)
    xcc = PRI_sess.run(reveal_op)
    print(xcc)
    print("{} elapsed: {} ".format(protocol_name, end_t - start_t))
    rst.deactivate()
Пример #7
0
def test_max_grad(X, axis, out_g, protocol="Helix"):
    cb.activate(protocol)

    global sess
    if sess is not None:
        sess.close()

    # ===========================
    # init global var
    # ===========================
    init = tf.compat.v1.global_variables_initializer()
    sess = tf.compat.v1.Session()
    sess.run(init)

    # ===========================
    # run mpc max grad
    # ===========================
    print("===========================")
    print("# run mpc max(X) grad, axis=", axis)
    mpc_Y = cb.SecureMax(X, axis=axis)
    print(sess.run(cb.SecureReveal(mpc_Y)))
    mpc_g = tf.gradients(mpc_Y, [common.get_var_from_rtt_tensor(X)])
    print(sess.run(mpc_g))
    print("===========================")

    # ===========================
    # check mpcmax grads value
    # ===========================
    mpc_out_g = []
    for i in range(len(mpc_g)):
        print("---------- Reveal mpcmax grad ------------")
        mpc_out_g.append(sess.run(cb.SecureReveal(mpc_g[i])))
        print(mpc_out_g)
        print("------------------------------------------")

    global res_flag
    res_flag = res_flag and common.check_mpc_op_grads(out_g, mpc_out_g)
Пример #8
0
#!/usr/bin/env python3
# rosetta LR with sample based (horizonal federated learning)
import latticex.rosetta as rtt  # difference from tensorflow
import math
import os
import sys
import csv
import tensorflow as tf
import numpy as np
import pandas as pd
import time
import argparse
task_id = 'task-id'
rtt.set_backend_loglevel(0)
rtt.activate("SecureNN", task_id=task_id)
node_id = rtt.get_current_node_id(task_id=task_id)
print("node_id:", node_id)
np.set_printoptions(suppress=True)
np.random.seed(0)

EPOCHES = 1
BATCH_SIZE = 32
learning_rate = 0.03125
DIM_NUM = 11
ROW_NUM = 1279

file_x = ""
file_y = ""
filex_name = "cls_train_x.csv"
filey_name = "cls_train_y.csv"
import tensorflow as tf
import numpy as np
from util import read_dataset

np.set_printoptions(suppress=True)

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

np.random.seed(0)

EPOCHES = 10
BATCH_SIZE = 16
learning_rate = 0.0002

task_id = 'task-id'
rtt.activate("Helix", task_id = task_id)
node_id = rtt.get_current_node_id(task_id = task_id)

# real data
# ######################################## difference from tensorflow
file_x = '../dsets/' + node_id + "/cls_test_x.csv"
file_y = '../dsets/' + node_id + "/cls_test_y.csv"
real_X, real_Y = rtt.PrivateDataset(data_owner=(
    0, 'p9'), label_owner='p9', task_id = task_id).load_data(file_x, file_y, header=None)
# ######################################## difference from tensorflow
DIM_NUM = real_X.shape[1]

X = tf.placeholder(tf.float64, [None, DIM_NUM])
Y = tf.placeholder(tf.float64, [None, 1])
print(X)
print(Y)
Пример #10
0
import tensorflow as tf
import latticex.rosetta as rst
import numpy as np

rst.activate("SecureNN")
X = tf.placeholder(tf.float64, [2, 2])
Y = tf.placeholder(tf.float64, [2, 1])

# initialize W & b
W = tf.Variable(tf.zeros([2, 1], dtype=tf.float64))
b = tf.Variable(tf.zeros([1], dtype=tf.float64))

# predict
pred_Y = tf.sigmoid(tf.matmul(X, W) + b)

# loss
logits = tf.matmul(X, W) + b
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=Y, logits=logits)

# optimizer
try:
    # optimizer
    train = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
    init = tf.global_variables_initializer()

    with tf.Session() as sess:
        sess.run(init)

        # train
        bX = np.array([['1', '2'], ['1', '2']])
        bY = np.array([['2'], ['1']])
Пример #11
0
import os
print("PWD:", os.getcwd())

#import unittest

import numpy as np
np.set_printoptions(suppress=True)

import tensorflow as tf

# our package
import latticex.rosetta as cb
#cb.activate("Helix")
cb.activate("SecureNN")

party_ID = cb.get_party_id()
# two initial values with double type.
num_a = np.array([[1, 2], [3, 4]], dtype=np.float_)
num_b = np.array([[10.1, 20.02], [30.003, 40.0004]], dtype=np.float_)

# the precision requirement of saving and restoring a float number
PRECISION = 1.0 / 1000

xa = tf.Variable(num_a, dtype=tf.double)
xb = tf.Variable(num_b, dtype=tf.double)

init_op = tf.compat.v1.global_variables_initializer()

print("========Local init input values================")
with tf.compat.v1.Session() as pre_sess:
    pre_sess.run(init_op)
import csv
import tensorflow as tf
import numpy as np
from util import read_dataset

np.set_printoptions(suppress=True)

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

np.random.seed(0)

EPOCHES = 10
BATCH_SIZE = 16
learning_rate = 0.0002

rtt.activate("Helix")
rtt.set_saver_model(['p0', 'p1', 'p2', 'p9'])
#rtt.set_saver_model([])
#rtt.set_saver_model(['p0', 'p1', 'p2'])
print('saver model:', rtt.get_saver_model())
node_id = rtt.get_current_node_id()

# real data
# ######################################## difference from tensorflow
file_x = '../dsets/' + node_id + "/reg_train_x.csv"
file_y = '../dsets/' + node_id + "/reg_train_y.csv"
real_X, real_Y = rtt.PrivateDataset(data_owner=(0, 'p9'),
                                    label_owner=1).load_data(file_x,
                                                             file_y,
                                                             header=None)
# ######################################## difference from tensorflow
Пример #13
0
def test_all_ops(prot):
    print("--------------------- begin {}-protocol testing!  ----------------".
          format(prot))
    rtt.activate(prot)

    # ###### binary binary ops
    # print("\n\n-------------    test binary op with two variables  ----------")
    # test_binary_op("add") # ok
    # test_binary_op("sub") # ok
    # test_binary_op("div") # ok
    # test_binary_op("floordiv") # ok
    # test_binary_op("mul") # ok
    # test_binary_op("matmul") # ok
    # return

    # test_snn_pow() # ok
    # test_binary_op("less") # ok
    # test_binary_op("less_equal") # ok
    # test_binary_op("not_equal") # ok
    # test_binary_op("equal") # ok
    # test_binary_op("greater") # ok
    # test_binary_op("greater_equal") # ok

    # ###### binary binary rh_is_const=True ops, mark [OK]
    # print("\n\n-------------    test binary op with const rh_is_const=True  ----------")
    # test_binary_op("add", False, True)
    # test_binary_op("sub", False, True)
    # test_binary_op("div", False, True)
    # test_binary_op("mul", False, True)
    # test_binary_op("matmul", False, True)
    # test_binary_op("less", False, True)
    # test_binary_op("less_equal", False, True)
    # test_binary_op("not_equal", False, True)
    # test_binary_op("equal", False, True)
    # test_binary_op("greater", False, True)
    # test_binary_op("greater_equal", False, True)

    # ###### binary binary lh_is_const=True ops, mark [ok]
    # print("\n\n-------------    test binary op with const lh_is_const=True  ----------")
    # test_binary_op("add", True, False)
    # test_binary_op("sub", True, False)
    # test_binary_op("div", True, False)
    # test_binary_op("mul", True, False)
    # test_binary_op("matmul", True, False)
    # test_binary_op("less", True, False)
    # test_binary_op("less_equal", True, False)
    # test_binary_op("not_equal", True, False)
    # test_binary_op("equal", True, False)
    # test_binary_op("greater", True, False)
    # test_binary_op("greater_equal", True, False)

    # ###### binary binary lh_is_const=True, rh_is_const=True ops, mark [todo]
    # print("\n\n-------------    test binary op with const,const  ----------")
    # test_binary_op("add", True, True, False)
    # test_binary_op("sub", True, True, False)
    # test_binary_op("div", True, True, False)
    # test_binary_op("mul", True, True, False)
    # test_binary_op("matmul", True, True, False)
    # test_binary_op("less", True, True, False)
    # test_binary_op("less_equal", True, True, False)
    # test_binary_op("not_equal", True, True, False)
    # test_binary_op("equal", True, True, False)
    # test_binary_op("greater", True, True, False)
    # test_binary_op("greater_equal", True, True, False)

    # ###### unary ops
    # test_unary_op("negative") # ok
    # test_unary_op("square") # ok
    # test_unary_op("abs") # ok
    # test_unary_op("abs_prime") # ok
    # test_unary_op("log")
    # test_unary_op("log1p")

    # test_reduce_op("reduce_max", 1) # reduction_indices=1, ok
    # test_reduce_op("reduce_min", 1) # ok
    # test_reduce_op("reduce_mean", 1) # ok
    # test_reduce_op("reduce_sum", 1) # ok

    # test_reduce_op("reduce_max", 0) # reduction_indices=1, ok
    # test_reduce_op("reduce_min", 0) # ok
    # test_reduce_op("reduce_mean", 0) # ok
    # test_reduce_op("reduce_sum", 0) # ok

    # test_reduce_op("reduce_max", -1) # reduction_indices=1, ok
    # test_reduce_op("reduce_min", -1) # ok
    # test_reduce_op("reduce_mean", -1) # ok
    # test_reduce_op("reduce_sum", -1) # ok

    # test_accumutive_op(rtt.SecureAddN, 2) # ok

    # test_nn_op("relu") # ok
    # test_nn_op("relu_prime") # ok
    # test_nn_op("sigmoid_cross_entropy_with_logits", "with_logits") # ok
    # test_nn_op("sigmoid") # ok

    # test io ops
    test_private_input_op("private_input")

    print("--------------------- end {}-protocol testing!  ----------------".
          format(prot))
Пример #14
0
import tensorflow as tf
import latticex.rosetta as rst


rst.activate()
X = tf.Variable(1.0, name='x')
Y = tf.Variable(2.0, name='y')
Z = tf.multiply(X, Y)
init = tf.global_variables_initializer()


try:
    with tf.Session() as sess:
        sess.run(init)
        for i in range(5):
            print(sess.run(Z))
    print("Pass")
except Exception as ex:
    print("Exception at:%s" %ex)
    print("Fail")


rst.deactivate()
Writer = tf.summary.FileWriter("log/pridict", tf.get_default_graph())
Writer.close()


Пример #15
0
def test(task_id):
    rtt.py_protocol_handler.set_loglevel(0)
    np.set_printoptions(suppress=True)

    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

    np.random.seed(0)

    EPOCHES = 10
    BATCH_SIZE = 16
    learning_rate = 0.0002

    rtt.activate("SecureNN", task_id=task_id)
    node_id = rtt.get_current_node_id(task_id=task_id)
    dg = tf.Graph()
    with dg.as_default():

        # real data
        # ######################################## difference from tensorflow
        file_x = '../dsets/' + node_id + "/reg_train_x.csv"
        file_y = '../dsets/' + node_id + "/reg_train_y.csv"
        real_X, real_Y = rtt.PrivateDataset(data_owner=(0, 1),
                                            label_owner=1,
                                            task_id=task_id).load_data(
                                                file_x, file_y, header=None)
        # ######################################## difference from tensorflow
        DIM_NUM = real_X.shape[1]

        X = tf.placeholder(tf.float64, [None, DIM_NUM])
        Y = tf.placeholder(tf.float64, [None, 1])
        print(X)
        print(Y)

        # initialize W & b
        W = tf.Variable(tf.zeros([DIM_NUM, 1], dtype=tf.float64))
        b = tf.Variable(tf.zeros([1], dtype=tf.float64))
        print(W)
        print(b)

        # predict
        pred_Y = tf.matmul(X, W) + b
        print(pred_Y)

        # loss
        loss = tf.square(Y - pred_Y)
        loss = tf.reduce_mean(loss)
        print(loss)

        # optimizer
        train = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
        print(train)

        init = tf.global_variables_initializer()
        print(init)

        # ########### for test, reveal
        reveal_W = rtt.SecureReveal(W)
        reveal_b = rtt.SecureReveal(b)
        reveal_Y = rtt.SecureReveal(pred_Y)
        # ########### for test, reveal

        config = tf.ConfigProto(inter_op_parallelism_threads=16,
                                intra_op_parallelism_threads=16)
        with tf.Session(task_id=task_id, config=config) as sess:
            sess.run(init)
            #rW, rb = sess.run([reveal_W, reveal_b])
            #print("init weight:{} \nbias:{}".format(rW, rb))

            # train
            BATCHES = math.ceil(len(real_X) / BATCH_SIZE)
            for e in range(EPOCHES):
                for i in range(BATCHES):
                    bX = real_X[(i * BATCH_SIZE):(i + 1) * BATCH_SIZE]
                    bY = real_Y[(i * BATCH_SIZE):(i + 1) * BATCH_SIZE]
                    print('*' * 80, task_id)
                    sess.run(train, feed_dict={X: bX, Y: bY})
                    print('#' * 80, task_id)

                    j = e * BATCHES + i
                    if j % 50 == 0 or (j == EPOCHES * BATCHES - 1
                                       and j % 50 != 0):
                        pass
                        #rW, rb = sess.run([reveal_W, reveal_b])
                        #print("I,E,B:{:0>4d},{:0>4d},{:0>4d} weight:{} \nbias:{}".format(
                        #    j, e, i, rW, rb))

            # predict
            #Y_pred = sess.run(reveal_Y, feed_dict={X: real_X, Y: real_Y})
            #print("Y_pred:", Y_pred)

        print(rtt.get_perf_stats(pretty=True, task_id=task_id))
        rtt.deactivate(task_id=task_id)
Пример #16
0
#!/usr/bin/env python3

# Import rosetta package
import latticex.rosetta as rtt
import tensorflow as tf

# Attention!
# This is just for presentation of integrating a new protocol.
# NEVER USE THIS PROTOCOL IN PRODUCTION ENVIRONMENT!
rtt.activate("Naive")

# Get private data from P0 and P1
matrix_a = tf.Variable(rtt.private_console_input(0, shape=(3, 2)))
matrix_b = tf.Variable(rtt.private_console_input(1, shape=(3, 2)))

# Just use the native tf.multiply operation.
cipher_result = tf.multiply(matrix_a, matrix_b)

# Start execution
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    # Take a glance at the ciphertext
    cipher_a = sess.run(matrix_a)
    print('local shared matrix a:\n', cipher_a)
    cipher_result_v = sess.run(cipher_result)
    print('local ciphertext result:\n', cipher_result_v)
    # Get the result of Rosetta multiply
    print('plaintext result:\n', sess.run(rtt.SecureReveal(cipher_result)))

rtt.deactivate()
Пример #17
0
import latticex.rosetta as cb

import tensorflow as tf
import sys, os
import numpy as np
np.set_printoptions(suppress=True)

protocol = "Helix"

if "ROSETTA_TEST_PROTOCOL" in os.environ.keys():
    print("***** test_cases use ", os.environ["ROSETTA_TEST_PROTOCOL"])
    protocol = os.environ["ROSETTA_TEST_PROTOCOL"]
else:
    print("***** test_cases use default helix protocol ")
cb.activate(protocol)

sess = None

xaa = np.array([[1.92, 0.2, 3], [-0.43, .0091, 1.3]])
xbb = np.array([[.2, 0.3], [-2, .3], [-1.111, -0.3]])
xa = tf.Variable(cb.private_input(0, xaa))
xb = tf.Variable(cb.private_input(1, xbb))

z1 = cb.SecureMatMul(xa, xb)
z0 = cb.SecureReveal(z1)
init = tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(init)
    print(sess.run(z1))
    print(sess.run(z0))
Пример #18
0
#!/usr/bin/env python3

# Import rosetta package
import latticex.rosetta as rtt
import tensorflow as tf

# You can activate a backend protocol, here we use Mystique
rtt.activate("Mystique")

# P0 is the Prover, providing all the witnesses(private), and
# P1 is the Verifier
matrix_a = tf.Variable(rtt.private_console_input(0, shape=(3, 2)))
matrix_b = tf.Variable(rtt.private_console_input(0, shape=(2, 3)))

# Just use the native tf.matmul operation.
cipher_result = tf.matmul(matrix_a, matrix_b)

# Start execution
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    # Take a glance at the ciphertext
    cipher_result_v = sess.run(cipher_result)
    print('local ciphertext result:', cipher_result_v)
    # Get the result of Rosetta matmul
    print('plaintext result:', sess.run(rtt.SecureReveal(cipher_result)))

rtt.deactivate()
Пример #19
0
#!/usr/bin/env python3

import latticex.rosetta as rtt
import tensorflow as tf
import sys
import numpy as np
np.set_printoptions(suppress=True)

protocol = "Helix"
rtt.activate(protocol)

input0 = rtt.private_input(0, 1.234)
input1 = rtt.private_input(1, 5.432)
input2 = rtt.private_input(2, 2.222)
print('input0:', input0)
print('input1:', input1)
print('input2:', input2)

i0 = tf.Variable(input0)
i1 = tf.Variable(input1)
i2 = tf.Variable(input2)

ii = rtt.SecureAdd(i0, i1)
ii = rtt.SecureAdd(ii, i2)
ir_add = rtt.SecureReveal(ii)  # i0 + i1 + i2

init = tf.global_variables_initializer()
with tf.Session() as sess1:
    sess1.run(init)
    print('rosetta add:', sess1.run(ir_add))
Пример #20
0
#!/bin/python3
# Example of Matmul for Rosetta
import latticex.rosetta as rtt  # import Rosetta
import tensorflow as tf
import numpy as np

rtt.activate(
    "SecureNN"
)  # activate the SecureNN protocol computation exectution environment
x = tf.Variable(rtt.private_input(0,
                                  [[1, 2], [2, 3]]))  # Alice private_input x
y = tf.Variable(rtt.private_input(1, [[1, 2], [2, 3]]))  # Bob private_input y
res = tf.matmul(x, y)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    res = sess.run(res)
    print('Rosetta.matmul:', sess.run(rtt.SecureReveal(res))
          )  # ret: [[b'14.000000' b'20.000000'] [b'20.000000' b'29.000000']]
    print('numpy.matmul:', np.matmul([[1, 2], [2, 3]], [[1, 2], [2, 3]]))
Пример #21
0
#!/usr/bin/env python3
# rosetta LR with sample based (horizonal federated learning)
import argparse
import csv
import math
import os
import sys
import time

import latticex.rosetta as rtt  # difference from tensorflow
import numpy as np
import pandas as pd
import tensorflow as tf

rtt.activate("SecureNN")
mpc_player_id = rtt.py_protocol_handler.get_party_id()
print("mpc_player_id:", mpc_player_id)

np.set_printoptions(suppress=True)
np.random.seed(0)

EPOCHES = 1
BATCH_SIZE = 32
learning_rate = 0.03125
DIM_NUM = 11
ROW_NUM = 1279

file_x = ""
file_y = ""
filex_name = "cls_train_x.csv"
filey_name = "cls_train_y.csv"
Пример #22
0
        "SAVER_MODE": 7,
        "SERVER_CERT": "certs/server-nopass.cert",
        "SERVER_PRIKEY": "certs/server-prikey",
        "SERVER_PRIKEY_PASSWORD": "******"
    }
}"""
json_config = json.loads(config_json_str)
_parser = argparse.ArgumentParser(description="LatticeX Rosetta")
_parser.add_argument('--party_id', type=int, help="Party ID",
                     required=False, default=-1, choices=[0, 1, 2])
_args, _unparsed = _parser.parse_known_args()
_party_id = _args.party_id
json_config["PARTY_ID"] = _party_id

json_config_str = json.dumps(json_config, indent=4)
rtt.activate("Helix", json_config_str)

init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init)
print("cipher add result: ", sess.run(sum_res))
sum_plain = rtt.SecureReveal(sum_res)
print("plain result: ", sess.run(sum_plain))

print("*" * 16 + "Test Case 3: activate[ with wrong name]" + "*" * 16)
rtt.activate("SecureNNX")

print("*" * 16 + "Test Case 4: activate SecureNN" + "*" * 16)
curr_config_str = rtt.get_protocol_config()
print("last config str:", curr_config_str)
new_config = json.loads(curr_config_str)