def get_predict_labels(model_file, input_node, output_node, input_data): # Load saved model tf.import_graph_def(load_pb_file(model_file)) print(f"predict labels - loaded model from file: {model_file}") print_nodes() # Get input / output tensors x_input = tf.compat.v1.get_default_graph().get_tensor_by_name(input_node) y_output = tf.compat.v1.get_default_graph().get_tensor_by_name(output_node) with tf.compat.v1.Session() as sess: sess.run(tf.compat.v1.global_variables_initializer()) predicted_labels = sess.run(y_output, feed_dict={x_input: input_data}) return np.argmax(predicted_labels)
def test_network(): batch_size = 4000 # Load MNIST data (for test set) (x_train, y_train, x_test, y_test) = load_mnist_data(start_batch=0, batch_size=batch_size) # Load saved model tf.import_graph_def(load_pb_file("models/cryptonets.pb")) print("loaded model") print_nodes() # Get input / output tensors x_input = tf.compat.v1.get_default_graph().get_tensor_by_name( "import/input:0") y_output = tf.compat.v1.get_default_graph().get_tensor_by_name( "import/output/BiasAdd:0") # Create configuration to encrypt input config = server_config(x_input.name) with tf.compat.v1.Session(config=config) as sess: sess.run(tf.compat.v1.global_variables_initializer()) start_time = time.time() t0 = time.perf_counter() y_hat = y_output.eval(feed_dict={x_input: x_test}) t1 = time.perf_counter() cur_times['t_computation'] = delta_ms(t0, t1) elasped_time = time.time() - start_time print("total time(s)", np.round(elasped_time, 3)) y_test_label = np.argmax(y_test, 1) if batch_size < 60: print("y_hat", np.round(y_hat, 2)) y_pred = np.argmax(y_hat, 1) correct_prediction = np.equal(y_pred, y_test_label) error_count = np.size(correct_prediction) - np.sum(correct_prediction) test_accuracy = np.mean(correct_prediction) print("Error count", error_count, "of", batch_size, "elements.") print("Accuracy: %g " % test_accuracy) cur_times['test_accuracy'] = test_accuracy
def test_network(FLAGS): (x_train, y_train, x_test, y_test) = load_mnist_data(FLAGS.start_batch, FLAGS.batch_size) # Load saved model tf.import_graph_def(load_pb_file(FLAGS.model_file)) print("loaded model") print_nodes() # Get input / output tensors x_input = tf.compat.v1.get_default_graph().get_tensor_by_name( FLAGS.input_node) y_output = tf.compat.v1.get_default_graph().get_tensor_by_name( FLAGS.output_node) # Create configuration to encrypt input FLAGS, unparsed = server_argument_parser().parse_known_args() config = server_config_from_flags(FLAGS, x_input.name) with tf.compat.v1.Session(config=config) as sess: sess.run(tf.compat.v1.global_variables_initializer()) start_time = time.time() y_hat = y_output.eval(feed_dict={x_input: x_test}) elasped_time = time.time() - start_time print("total time(s)", np.round(elasped_time, 3)) if not FLAGS.enable_client: y_test_label = np.argmax(y_test, 1) if FLAGS.batch_size < 60: print("y_hat", np.round(y_hat, 2)) y_pred = np.argmax(y_hat, 1) correct_prediction = np.equal(y_pred, y_test_label) error_count = np.size(correct_prediction) - np.sum(correct_prediction) test_accuracy = np.mean(correct_prediction) print("Error count", error_count, "of", FLAGS.batch_size, "elements.") print("Accuracy: %g " % test_accuracy)
def run_server(FLAGS, query): # tf.import_graph_def(load_pb_file(FLAGS.model_file)) tf.import_graph_def( load_pb_file("/home/dockuser/models/cryptonets-relu.pb")) # tf.import_graph_def(load_pb_file(f"/home/dockuser/models/{FLAGS.port}.pb")) print("loaded model") print_nodes() print(f"query: {query.shape}") # Get input / output tensors x_input = tf.compat.v1.get_default_graph().get_tensor_by_name( # FLAGS.input_node # "import/Placeholder:0" "import/input:0") y_output = tf.compat.v1.get_default_graph().get_tensor_by_name( "import/output/BiasAdd:0" # FLAGS.output_node # "import/dense/BiasAdd:0" ) # weight_check = tf.compat.v1.get_default_graph().get_tensor_by_name("import/conv2d_1/kernel:0") # print(weight_check) # Load saved model # # model = models.Local_Model(FLAGS.num_classes, FLAGS.dataset_name) # load the model object # input_x = tf.compat.v1.placeholder( # define input # shape=( # None, # 3, # 32, # 32, # ), name="input", dtype=tf.float32) # init = tf.compat.v1.global_variables_initializer() # model.build((None, 3, 32, 32)) # model.compile('adam', tf.keras.losses.CategoricalCrossentropy()) # output_y = model.get_out(input_x) # input through layers # print("loaded model") # print_nodes() # print(input_x) # print(output_y) # Get input / output tensors # x_input = tf.compat.v1.get_default_graph().get_tensor_by_name( # FLAGS.input_node) # y_output = tf.compat.v1.get_default_graph().get_tensor_by_name( # FLAGS.output_node) # y_max = tf.nn.softmax(y_output) # y_max = y_output # y_max = approximate_softmax(x=y_output, nr_terms=2) # y_max = max_pool(y_output=y_output, FLAGS=FLAGS) print('r_star: ', FLAGS.r_star) r_star = np.array(FLAGS.r_star) # r - r* (subtract the random vector r* from logits) y_max = tf.subtract(y_output, tf.convert_to_tensor(r_star, dtype=tf.float32)) # Create configuration to encrypt input # config = server_config_from_flags(FLAGS, x_input.name) config = server_config_from_flags(FLAGS, x_input.name) with tf.compat.v1.Session(config=config) as sess: sess.run(tf.compat.v1.global_variables_initializer()) # model.initialize_weights(FLAGS.model_file) start_time = time.time() print(f"query shape before processing: {query.shape}") inference_start = time.time() y_hat = sess.run(y_max, feed_dict={x_input: query}) inference_end = time.time() print(f"Inference time: {inference_end - inference_start}s") with open(inference_no_network_times_name, 'a') as outfile: outfile.write(str(inference_end - inference_start)) outfile.write('\n') elasped_time = time.time() - start_time print("total time(s)", np.round(elasped_time, 3)) party_id = int(FLAGS.port) msg = "doing 2pc" print(msg) log_timing(stage='server_client:' + msg, log_file=FLAGS.log_timing_file) print('r_star (r*): ', array_str(r_star)) r_star = round_array(x=r_star, exp=FLAGS.round_exp) print('rounded r_star (r*): ', array_str(r_star)) if FLAGS.backend == 'HE_SEAL': argmax_time_start = time.time() with open(f'{out_server_name}{FLAGS.port}.txt', 'w') as outfile: # party id # assume batch size of 1 for now TODO: make this work for > 1 batch size for val in r_star.flatten(): outfile.write(f"{int(val)}" + '\n') process = subprocess.Popen([ './gc-emp-test/bin/argmax_1', '1', '12345', f'{out_server_name}{FLAGS.port}.txt', f'{out_final_name}{FLAGS.port}.txt' ]) # time.sleep(15) process.wait() argmax_time_end = time.time() with open(argmax_times_name, 'a') as outfile: outfile.write(str(argmax_time_end - argmax_time_start)) outfile.write("\n") log_timing(stage='server_client:finished 2PC', log_file=FLAGS.log_timing_file)
def run_server(FLAGS): (x_train, y_train, x_test, y_test) = load_mnist_data(FLAGS.start_batch, FLAGS.batch_size) # Load saved model tf.import_graph_def(load_pb_file(FLAGS.model_file)) print("loaded model") print_nodes() # Get input / output tensors x_input = tf.compat.v1.get_default_graph().get_tensor_by_name( FLAGS.input_node) y_output = tf.compat.v1.get_default_graph().get_tensor_by_name( FLAGS.output_node) # y_max = tf.nn.softmax(y_output) # y_max = y_output # y_max = approximate_softmax(x=y_output, nr_terms=2) # y_max = max_pool(y_output=y_output, FLAGS=FLAGS) print('r_star: ', FLAGS.rstar) print('is r_star [-1.0]', FLAGS.rstar == [-1.0]) r_star = None if FLAGS.rstar is None: # for debugging: we don't want any r* y_max = y_output else: if FLAGS.rstar == [-1.0]: # Generate random r_star if y_test is not None: r_shape = y_test.shape batch_size = r_shape[0] num_classes = r_shape[1] else: batch_size, num_classes = FLAGS.batch_size, FLAGS.num_classes r_star = get_rstar_server( max_logit=FLAGS.max_logit, batch_size=batch_size, num_classes=num_classes, exp=FLAGS.rstar_exp, ) else: r_star = np.array(FLAGS.rstar) # r - r* (subtract the random vector r* from logits) y_max = tf.subtract(y_output, tf.convert_to_tensor(r_star, dtype=tf.float32)) if FLAGS.debug is True: print('y_max shape: ', y_max.shape) print('r_star shape: ', r_star.shape) y_max = tf.concat([y_max, r_star], axis=0) # Create configuration to encrypt input config = server_config_from_flags(FLAGS, x_input.name) with tf.compat.v1.Session(config=config) as sess: sess.run(tf.compat.v1.global_variables_initializer()) start_time = time.time() y_hat = sess.run(y_max, feed_dict={x_input: x_test}) # y_hat = y_max.eval(feed_dict={x_input: x_test}) print('y_hat: ', y_hat) if FLAGS.debug is True: r_star = y_hat[FLAGS.batch_size:] y_hat = y_hat[:FLAGS.batch_size] print("logits (y_hat): ", array_str(y_hat)) print("logits (y_hat) type: ", type(y_hat)) print("logits (y_hat) shape: ", y_hat.shape) # print("change y_hat to one_hot encoding") y_pred = y_hat.argmax(axis=1) print("y_pred: ", y_pred) elasped_time = time.time() - start_time print("total time(s)", np.round(elasped_time, 3)) party_id = int(FLAGS.port) if r_star is not None: print("doing 2pc") print('r_star (r*): ', array_str(r_star)) if FLAGS.round_exp is not None: # r_star = (r_star * 2 ** FLAGS.round_exp).astype(np.int64) r_star = round_array(x=r_star, exp=FLAGS.round_exp) print('rounded r_star (r*): ', array_str(r_star)) with open(f'{out_server_name}{party_id}.txt', 'w') as outfile: # party id # assume batch size of 1 for now TODO: make this work for > 1 batch size for val in r_star.flatten(): outfile.write(f"{int(val)}" + '\n') time.sleep(1) process = subprocess.Popen([ './gc-emp-test/bin/argmax_1', '1', '12345', f'{out_server_name}{party_id}.txt', f'{out_final_name}{party_id}.txt' ]) # time.sleep(15) process.wait() else: print('r_star is None in he_server.py!') if not FLAGS.enable_client: y_test_label = np.argmax(y_test, 1) if FLAGS.batch_size < 60: print("y_hat", np.round(y_hat, 2)) y_pred = np.argmax(y_hat, 1) correct_prediction = np.equal(y_pred, y_test_label) error_count = np.size(correct_prediction) - np.sum(correct_prediction) test_accuracy = np.mean(correct_prediction) print("Error count", error_count, "of", FLAGS.batch_size, "elements.") print("Accuracy: %g " % test_accuracy)