pred_Y = tf.matmul(X, W) + b print(pred_Y) # loss loss = tf.reduce_mean(tf.square(Y - pred_Y)) print(loss) # optimizer train = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss) print(train) init = tf.global_variables_initializer() print(init) # ########### for test, reveal reveal_W = rtt.MpcReveal(W) reveal_b = rtt.MpcReveal(b) reveal_Y = rtt.MpcReveal(pred_Y) # ########### for test, reveal with tf.Session() as sess: sess.run(init) #xW, xb = sess.run([W, b]) #print("init weight:{} \nbias:{}".format(xW, xb)) rW, rb = sess.run([reveal_W, reveal_b]) print("init weight:{} \nbias:{}".format(rW, rb)) for i in range(EPOCHES): sess.run(train)
import latticex.rosetta as rtt import tensorflow as tf Alice = tf.Variable(rtt.private_input(0, 2000001)) Bob = tf.Variable(rtt.private_input(1, 2000000)) res = tf.greater(Alice, Bob) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) res = sess.run(res) print('ret:', sess.run(rtt.MpcReveal(res))) # ret: 1.0
#print("update W:", mpc_sess.run(rstt.MpcReveal(W))) #print("update b:", mpc_sess.run(rstt.MpcReveal(b))) #if (epoch + 1) % DIS_STEP == 0: #type(avg_loss) #print("****Epoch: ", epoch, "\n ****current loss:") #print(avg_loss) # print("****Epoch: ", epoch, "\n ****current model:") # print("W:", mpc_sess.run(rstt.MpcReveal(W))) # print("b:", mpc_sess.run(rstt.MpcReveal(b))) end_t = time.time() print("The training cost: ", end_t - start_t, "seconds") print("************ MPC Trainning Finished! **************") print("trained-param W: ", mpc_sess.run(rstt.MpcReveal(W)).tolist()) print("trained-param b: ", mpc_sess.run(rstt.MpcReveal(b)).tolist()) print("************ MPC Fitting on TRAIN dataset **************") real_label = rstt.MpcReveal(Y) pred_reveal = rstt.MpcReveal(pred) pred_o, real_o = mpc_sess.run([pred_reveal, real_label], feed_dict={ X: shared_attr_ds, Y: shared_label_ds }) true_count = int(0) total_count = int(len(pred_o)) for i in range(len(pred_o)): if round(pred_o[i][0]) == round(real_o[i][0]):
_, curr_loss = mpc_sess.run([optimizer, loss], feed_dict={ X: batch_attr, Y: batch_label }) #avg_loss += curr_loss / total_batch #print("AFT curr_W:", mpc_sess.run(W)) #print("AFT curr_b:", mpc_sess.run(b)) if (epoch + 1) % DIS_STEP == 0: #type(avg_loss) #print("****Epoch: ", epoch, "\n ****current loss:") #print(avg_loss) print("****Epoch:", epoch + 1) print("curr_W:\n", mpc_sess.run(rstt.MpcReveal(W))) print("curr_b:\n", mpc_sess.run(rstt.MpcReveal(b))) end_t = time.time() print("************ MPC Trainning Finished! **************") print("The training cost: ", end_t - start_t, "seconds") print("W:", mpc_sess.run(rstt.MpcReveal(W))) print("b:", mpc_sess.run(rstt.MpcReveal(b))) print("************ MPC Fitting on TRAIN dataset **************") self_check_pred = rstt.MpcReveal(tf.sigmoid(tf.matmul(X, W) + b)) # self_check_pred =rstt.MpcReveal(rstt.MpcSigmoid(rstt.MpcAdd(rstt.MpcMatMul(X, W), b))) real_label = rstt.MpcReveal(Y) pred_o, real_o = mpc_sess.run([self_check_pred, real_label], feed_dict={ X: shared_attr_ds,
print(Y) # initialize W & b W = tf.Variable(tf.ones([DIM_NUM, 1], dtype=tf.float64)) b = tf.Variable(tf.ones([1], dtype=tf.float64)) #w0 = np.ones([DIM_NUM, 1]) #b0 = np.ones([1]) #W = tf.Variable(rtt.private_input(2, w0)) #b = tf.Variable(rtt.private_input(2, b0)) print(W) print(b) # predict pred_Y = tf.matmul(X, W) + b print(pred_Y) XR = rtt.MpcReveal(X) YR = rtt.MpcReveal(Y) RR = rtt.MpcReveal(pred_Y) init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) xr = sess.run(RR, feed_dict={X: real_X, Y: real_Y}) print(xr) xr = sess.run(XR, feed_dict={X: real_X}) print(xr) xr = sess.run(YR, feed_dict={Y: real_Y}) print(xr) exit(0)
# rstt.MpcReveal(tf.subtract(ONE, Y)), # rstt.MpcReveal((ONE - Y) * tf.log(1 - sig_v)) # ], # feed_dict={X:batch_attr, Y: batch_label}) # if i <= 2: # print("logits:", logit_pv, " \n sig:", sig_pv1, "and \n", sig_pv2) # print("logv1:", log_v1, "\n logv2:", log_v2, "\n ty:", ty, "\n loss:", mpc_sess.run(rstt.MpcReveal(curr_loss))) #avg_loss += curr_loss / total_batch #print("AFT curr_W:", mpc_sess.run(W)) #print("AFT curr_b:", mpc_sess.run(b)) if (epoch + 1) % DIS_STEP == 0: #type(avg_loss) #print("****Epoch: ", epoch, "\n ****current loss:") #print(avg_loss) cW, cb = mpc_sess.run([rstt.MpcReveal(W), rstt.MpcReveal(b)]) #print("i:{:0>4d} weight:{} \nbias:{}".format(epoch+1, cW, cb)) if args.party_id == 0: savecsv("{}-{:0>4d}-{}.csv".format(csvprefix, epoch+1, "W"), cW) savecsv("{}-{:0>4d}-{}.csv".format(csvprefix, epoch+1, "b"), cb) print("****Epoch:", epoch+1) print("curr_W:\n", cW) print("curr_b:\n", cb) end_t = time.time() print("************ MPC Trainning Finished! **************") print("The training cost: ", end_t - start_t, "seconds") print("W:", mpc_sess.run(rstt.MpcReveal(W))) print("b:", mpc_sess.run(rstt.MpcReveal(b)))