def test(task_id): rtt.py_protocol_handler.set_loglevel(0) np.set_printoptions(suppress=True) rtt.activate("SecureNN", task_id=task_id) print('begin get io wrapper', task_id) node_id = rtt.get_current_node_id(task_id=task_id) print('end get io wrapper', task_id) dg = tf.Graph() with dg.as_default(): # Get private data from Alice (input x), Bob (input y) w = tf.Variable(rtt.private_input(0, [[1, 2], [2, 3]], task_id=task_id)) x = tf.Variable(rtt.private_input(1, [[1, 2], [2, 3]], task_id=task_id)) y = tf.Variable(rtt.private_input(2, [[1, 2], [2, 3]], task_id=task_id)) # Define matmul operation res = tf.matmul(tf.matmul(w, x), y) init = tf.global_variables_initializer() config = tf.ConfigProto(inter_op_parallelism_threads=16, intra_op_parallelism_threads=16) with tf.Session(task_id=task_id, config=config) as sess: sess.run(init) #rW, rb = sess.run([reveal_W, reveal_b]) #print("init weight:{} \nbias:{}".format(rW, rb)) #Y_pred = sess.run(reveal_Y, feed_dict={X: real_X, Y: real_Y}) #print("Y_pred:", Y_pred) sess.run(res) print(rtt.get_perf_stats(pretty=True, task_id=task_id)) rtt.deactivate(task_id=task_id)
def bin_op_rh_const_test(protocol, task_id, tf_op, x_init, y_init, expect_val): Result = True local_g = tf.Graph() with local_g.as_default(): X = tf.Variable(x_init) Z = tf_op(X, y_init) rv_Z = rtt.SecureReveal(Z) init = tf.compat.v1.global_variables_initializer() try: rtt.activate(protocol, task_id=task_id) config = tf.ConfigProto(inter_op_parallelism_threads=16, intra_op_parallelism_threads=16) with tf.Session(task_id=task_id, config=config) as sess: sess.run(init) real_Z = sess.run(rv_Z) res = check_mpc_results(real_Z, expect_val) if (res == False): Result = False rtt.deactivate(task_id=task_id) except Exception as e: print(str(e)) Result = False return Result
def test_protocol(protocol_name="SecureNN"): rst.activate(protocol_name) PRI_LOGITS = rst.private_input(0, np_a) PRI_LABELS = rst.private_input(1, np_b) PRI_logits = tf.Variable(PRI_LOGITS, dtype=tf.string) PRI_labels = tf.Variable(PRI_LABELS, dtype=tf.string) init = tf.compat.v1.global_variables_initializer() PRI_sess = tf.compat.v1.Session() PRI_sess.run(init) start_t = time.time() result_mpc = rst.secure_sigmoid_cross_entropy_with_logits( logits=PRI_logits, labels=PRI_labels) PRI_sess.run(result_mpc) end_t = time.time() reveal_op = rst.SecureReveal(result_mpc) xcc = PRI_sess.run(reveal_op) print(xcc) print("{} elapsed: {} ".format(protocol_name, end_t - start_t)) rst.deactivate()
Y1 = tf.Variable([[7.0, 8.0]], name="y1") Z1 = tf.matmul(X, Y1, transpose_b=True) Y2 = tf.Variable([[7.0, 8.0, 9.0]], name="y2") Z2 = tf.matmul(X, Y2, transpose_a=True, transpose_b=True) try: sess = tf.Session() sess.run(tf.global_variables_initializer()) print(sess.run(Z)) print(sess.run(Z1)) print(sess.run(Z2)) print("Pass") except Exception: print("Fail") # try: # train_step = tf.train.GradientDescentOptimizer(0.01).minimize(Z) # print("Pass") # except Exception: # print("Fail") Writer = tf.summary.FileWriter("log/matmul2", tf.get_default_graph()) Writer.close() rst.deactivate()
delta_b = db * learning_rate update_w = W - delta_w update_b = b - delta_b # update variables assign_update_w = tf.assign(W, update_w) assign_update_b = tf.assign(b, update_b) # training init = tf.global_variables_initializer() with tf.Session() as sess: # init var & iter sess.run(init) sess.run([iter_x0.initializer, iter_x1.initializer, iter_y.initializer]) # train start_time = time.time() BATCHES = int(ROW_NUM / BATCH_SIZE) for e in range(EPOCHES): for i in range(BATCHES): sess.run([assign_update_w, assign_update_b]) training_use_time = time.time()-start_time print("training_use_time: {} seconds".format(training_use_time)) print(rtt.get_perf_stats(True)) rtt.deactivate()
import tensorflow as tf import latticex.rosetta as cb x = tf.Variable([3.0], name='x') optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(x) with tf.Session() as sess: tf.global_variables_initializer().run() try: sess.run(optimizer) print("Pass") except Exception: print("Fail") #writer = tf.summary.FileWriter("log", tf.get_default_graph()) #writer.close() cb.deactivate()
b = tf.Variable(tf.zeros([1], dtype=tf.float64), name='b') print(W) print(b) # predict pred_Y = tf.sigmoid(tf.matmul(X, W) + b) print(pred_Y) # save saver = tf.train.Saver(var_list=None, max_to_keep=5, name='v2') os.makedirs("./log/ckpt_" + node_id, exist_ok=True) # init init = tf.global_variables_initializer() reveal_Y = rtt.SecureReveal(pred_Y) with tf.Session(task_id = task_id) as sess: sess.run(init) if os.path.exists('./log/ckpt_' + node_id + '/checkpoint'): saver.restore(sess, './log/ckpt_' + node_id + '/model') # predict Y_pred = sess.run(pred_Y, feed_dict={X: real_X}) print("Y_pred:", Y_pred) reveal_y = sess.run(reveal_Y, feed_dict={X: real_X}) print("reveal_Y:", reveal_y) print(rtt.get_perf_stats(True, task_id = task_id)) rtt.deactivate(task_id = task_id)
import tensorflow as tf import latticex.rosetta as rrt X = tf.truncated_normal([1], dtype=tf.float64) Y = tf.Variable(2.0, name='y') Z = Y * X try: train_step = tf.train.GradientDescentOptimizer(0.01).minimize(Z) print("Pass") except Exception: print("Fail") Writer = tf.summary.FileWriter("log/truncated_normal", tf.get_default_graph()) Writer.close() rrt.deactivate()
def test(task_id): rtt.py_protocol_handler.set_loglevel(0) np.set_printoptions(suppress=True) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' np.random.seed(0) EPOCHES = 10 BATCH_SIZE = 16 learning_rate = 0.0002 rtt.activate("SecureNN", task_id=task_id) node_id = rtt.get_current_node_id(task_id=task_id) dg = tf.Graph() with dg.as_default(): # real data # ######################################## difference from tensorflow file_x = '../dsets/' + node_id + "/reg_train_x.csv" file_y = '../dsets/' + node_id + "/reg_train_y.csv" real_X, real_Y = rtt.PrivateDataset(data_owner=(0, 1), label_owner=1, task_id=task_id).load_data( file_x, file_y, header=None) # ######################################## difference from tensorflow DIM_NUM = real_X.shape[1] X = tf.placeholder(tf.float64, [None, DIM_NUM]) Y = tf.placeholder(tf.float64, [None, 1]) print(X) print(Y) # initialize W & b W = tf.Variable(tf.zeros([DIM_NUM, 1], dtype=tf.float64)) b = tf.Variable(tf.zeros([1], dtype=tf.float64)) print(W) print(b) # predict pred_Y = tf.matmul(X, W) + b print(pred_Y) # loss loss = tf.square(Y - pred_Y) loss = tf.reduce_mean(loss) print(loss) # optimizer train = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss) print(train) init = tf.global_variables_initializer() print(init) # ########### for test, reveal reveal_W = rtt.SecureReveal(W) reveal_b = rtt.SecureReveal(b) reveal_Y = rtt.SecureReveal(pred_Y) # ########### for test, reveal config = tf.ConfigProto(inter_op_parallelism_threads=16, intra_op_parallelism_threads=16) with tf.Session(task_id=task_id, config=config) as sess: sess.run(init) #rW, rb = sess.run([reveal_W, reveal_b]) #print("init weight:{} \nbias:{}".format(rW, rb)) # train BATCHES = math.ceil(len(real_X) / BATCH_SIZE) for e in range(EPOCHES): for i in range(BATCHES): bX = real_X[(i * BATCH_SIZE):(i + 1) * BATCH_SIZE] bY = real_Y[(i * BATCH_SIZE):(i + 1) * BATCH_SIZE] print('*' * 80, task_id) sess.run(train, feed_dict={X: bX, Y: bY}) print('#' * 80, task_id) j = e * BATCHES + i if j % 50 == 0 or (j == EPOCHES * BATCHES - 1 and j % 50 != 0): pass #rW, rb = sess.run([reveal_W, reveal_b]) #print("I,E,B:{:0>4d},{:0>4d},{:0>4d} weight:{} \nbias:{}".format( # j, e, i, rW, rb)) # predict #Y_pred = sess.run(reveal_Y, feed_dict={X: real_X, Y: real_Y}) #print("Y_pred:", Y_pred) print(rtt.get_perf_stats(pretty=True, task_id=task_id)) rtt.deactivate(task_id=task_id)