Ejemplo n.º 1
0
def test_cryptonets_relu(FLAGS):
    (x_train, y_train, x_test, y_test) = load_mnist_data()

    x = tf.compat.v1.placeholder(tf.float32, [None, 28, 28, 1], name='input')
    y_ = tf.compat.v1.placeholder(tf.float32, [None, 10])

    # Create the model
    y_conv = cryptonets_relu_test_squashed(x)

    config = server_config_from_flags(FLAGS, x.name)
    print('config', config)

    with tf.compat.v1.Session(config=config) as sess:
        x_test = x_test[:FLAGS.batch_size]
        y_test = y_test[:FLAGS.batch_size]
        start_time = time.time()
        y_conv_val = y_conv.eval(feed_dict={x: x_test, y_: y_test})
        elasped_time = (time.time() - start_time)
        print("total time(s)", np.round(elasped_time, 3))

    if not FLAGS.enable_client:
        y_test_batch = y_test[:FLAGS.batch_size]
        y_label_batch = np.argmax(y_test_batch, 1)

        y_pred = np.argmax(y_conv_val, 1)
        correct_prediction = np.equal(y_pred, y_label_batch)
        error_count = np.size(correct_prediction) - np.sum(correct_prediction)
        test_accuracy = np.mean(correct_prediction)

        print('Error count', error_count, 'of', FLAGS.batch_size, 'elements.')
        print('Accuracy: %g ' % test_accuracy)
Ejemplo n.º 2
0
def test_network(FLAGS):
    (x_train, y_train, x_test,
     y_test) = load_mnist_data(FLAGS.start_batch, FLAGS.batch_size)

    # Load saved model
    tf.import_graph_def(load_pb_file(FLAGS.model_file))

    print("loaded model")
    print_nodes()

    # Get input / output tensors
    x_input = tf.compat.v1.get_default_graph().get_tensor_by_name(
        FLAGS.input_node)
    y_output = tf.compat.v1.get_default_graph().get_tensor_by_name(
        FLAGS.output_node)

    # Create configuration to encrypt input
    FLAGS, unparsed = server_argument_parser().parse_known_args()
    config = server_config_from_flags(FLAGS, x_input.name)
    with tf.compat.v1.Session(config=config) as sess:
        sess.run(tf.compat.v1.global_variables_initializer())
        start_time = time.time()
        y_hat = y_output.eval(feed_dict={x_input: x_test})
        elasped_time = time.time() - start_time
        print("total time(s)", np.round(elasped_time, 3))

    if not FLAGS.enable_client:
        y_test_label = np.argmax(y_test, 1)

        if FLAGS.batch_size < 60:
            print("y_hat", np.round(y_hat, 2))

        y_pred = np.argmax(y_hat, 1)
        correct_prediction = np.equal(y_pred, y_test_label)
        error_count = np.size(correct_prediction) - np.sum(correct_prediction)
        test_accuracy = np.mean(correct_prediction)

        print("Error count", error_count, "of", FLAGS.batch_size, "elements.")
        print("Accuracy: %g " % test_accuracy)
Ejemplo n.º 3
0
def test_mnist_mlp(FLAGS):
    (x_train, y_train, x_test, y_test) = load_mnist_data()
    x_test = x_test[:FLAGS.batch_size]
    y_test = y_test[:FLAGS.batch_size]

    graph_def = load_pb_file('./model/model.pb')

    with tf.Graph().as_default():
        tf.import_graph_def(graph_def)
        y_conv = tf.compat.v1.get_default_graph().get_tensor_by_name(
            "import/output:0")
        x_input = tf.compat.v1.get_default_graph().get_tensor_by_name(
            "import/input:0")

        config = server_config_from_flags(FLAGS, x_input.name)

        print('config', config)

        with tf.compat.v1.Session(config=config) as sess:
            start_time = time.time()
            y_conv_val = y_conv.eval(
                session=sess, feed_dict={
                    x_input: x_test,
                })
            elasped_time = (time.time() - start_time)
            print("total time(s)", np.round(elasped_time, 3))

    if not FLAGS.enable_client:
        y_test_batch = y_test[:FLAGS.batch_size]
        y_label_batch = np.argmax(y_test_batch, 1)

        y_pred = np.argmax(y_conv_val, 1)
        print('y_pred', y_pred)
        correct_prediction = np.equal(y_pred, y_label_batch)
        error_count = np.size(correct_prediction) - np.sum(correct_prediction)
        test_accuracy = np.mean(correct_prediction)

        print('Error count', error_count, 'of', FLAGS.batch_size, 'elements.')
        print('Accuracy: %g ' % test_accuracy)
Ejemplo n.º 4
0
def run_server(FLAGS, query):
    # tf.import_graph_def(load_pb_file(FLAGS.model_file))
    tf.import_graph_def(
        load_pb_file("/home/dockuser/models/cryptonets-relu.pb"))
    # tf.import_graph_def(load_pb_file(f"/home/dockuser/models/{FLAGS.port}.pb"))
    print("loaded model")
    print_nodes()
    print(f"query: {query.shape}")

    # Get input / output tensors
    x_input = tf.compat.v1.get_default_graph().get_tensor_by_name(
        # FLAGS.input_node
        # "import/Placeholder:0"
        "import/input:0")
    y_output = tf.compat.v1.get_default_graph().get_tensor_by_name(
        "import/output/BiasAdd:0"
        # FLAGS.output_node
        # "import/dense/BiasAdd:0"
    )

    # weight_check = tf.compat.v1.get_default_graph().get_tensor_by_name("import/conv2d_1/kernel:0")
    # print(weight_check)

    # Load saved model
    #
    # model = models.Local_Model(FLAGS.num_classes, FLAGS.dataset_name)  # load the model object
    # input_x = tf.compat.v1.placeholder(  # define input
    #     shape=(
    #         None,
    #         3,
    #         32,
    #         32,
    #     ), name="input", dtype=tf.float32)
    # init = tf.compat.v1.global_variables_initializer()
    # model.build((None, 3, 32, 32))
    # model.compile('adam', tf.keras.losses.CategoricalCrossentropy())

    # output_y = model.get_out(input_x)  # input through layers
    # print("loaded model")
    # print_nodes()
    # print(input_x)
    # print(output_y)
    # Get input / output tensors
    # x_input = tf.compat.v1.get_default_graph().get_tensor_by_name(
    #     FLAGS.input_node)
    # y_output = tf.compat.v1.get_default_graph().get_tensor_by_name(
    #     FLAGS.output_node)
    # y_max = tf.nn.softmax(y_output)
    # y_max = y_output
    # y_max = approximate_softmax(x=y_output, nr_terms=2)
    # y_max = max_pool(y_output=y_output, FLAGS=FLAGS)
    print('r_star: ', FLAGS.r_star)
    r_star = np.array(FLAGS.r_star)
    # r - r* (subtract the random vector r* from logits)
    y_max = tf.subtract(y_output, tf.convert_to_tensor(r_star,
                                                       dtype=tf.float32))

    # Create configuration to encrypt input
    # config = server_config_from_flags(FLAGS, x_input.name)
    config = server_config_from_flags(FLAGS, x_input.name)
    with tf.compat.v1.Session(config=config) as sess:
        sess.run(tf.compat.v1.global_variables_initializer())
        # model.initialize_weights(FLAGS.model_file)
        start_time = time.time()
        print(f"query shape before processing: {query.shape}")
        inference_start = time.time()
        y_hat = sess.run(y_max, feed_dict={x_input: query})
        inference_end = time.time()
        print(f"Inference time: {inference_end - inference_start}s")
        with open(inference_no_network_times_name, 'a') as outfile:
            outfile.write(str(inference_end - inference_start))
            outfile.write('\n')
        elasped_time = time.time() - start_time
        print("total time(s)", np.round(elasped_time, 3))
        party_id = int(FLAGS.port)

        msg = "doing 2pc"
        print(msg)
        log_timing(stage='server_client:' + msg,
                   log_file=FLAGS.log_timing_file)
        print('r_star (r*): ', array_str(r_star))
        r_star = round_array(x=r_star, exp=FLAGS.round_exp)
        print('rounded r_star (r*): ', array_str(r_star))
        if FLAGS.backend == 'HE_SEAL':
            argmax_time_start = time.time()
            with open(f'{out_server_name}{FLAGS.port}.txt',
                      'w') as outfile:  # party id
                # assume batch size of 1 for now TODO: make this work for > 1 batch size
                for val in r_star.flatten():
                    outfile.write(f"{int(val)}" + '\n')
            process = subprocess.Popen([
                './gc-emp-test/bin/argmax_1', '1', '12345',
                f'{out_server_name}{FLAGS.port}.txt',
                f'{out_final_name}{FLAGS.port}.txt'
            ])
            # time.sleep(15)
            process.wait()
            argmax_time_end = time.time()
            with open(argmax_times_name, 'a') as outfile:
                outfile.write(str(argmax_time_end - argmax_time_start))
                outfile.write("\n")
        log_timing(stage='server_client:finished 2PC',
                   log_file=FLAGS.log_timing_file)
Ejemplo n.º 5
0
def test_mnist_cnn(FLAGS):
    (x_train, y_train, x_test, y_test) = load_mnist_data()

    x = tf.compat.v1.placeholder(tf.float32, [None, 28, 28, 1], name='input')
    y_ = tf.compat.v1.placeholder(tf.float32, [None, 10])

    # Create the model
    #Using full dataset
    y_conv = cryptonets_test_squashed(x)

    config = server_config_from_flags(FLAGS, x.name)

    print('config', config)

    with tf.compat.v1.Session(config=config) as sess:
        x_test = x_test[:FLAGS.batch_size]
        y_test = y_test[:FLAGS.batch_size]
        start_time = time.time()
        y_conv_val = y_conv.eval(feed_dict={x: x_test, y_: y_test})
        elasped_time = (time.time() - start_time)
        print("total time(s)", np.round(elasped_time, 3))
        print('y_conv_val', np.round(y_conv_val, 2))

    y_test_batch = y_test[:FLAGS.batch_size]
    y_label_batch = np.argmax(y_test_batch, 1)

    correct_prediction = np.equal(np.argmax(y_conv_val, 1), y_label_batch)
    error_count = np.size(correct_prediction) - np.sum(correct_prediction)
    test_accuracy = np.mean(correct_prediction)
    print('Using full dataset')
    print('Error count:', error_count, 'of', FLAGS.batch_size, 'elements.')
    print('Accuracy: ', test_accuracy)

    #Using in HE_averaging
	
    y_conv = cryptonets_HE_avg(x)
    config = server_config_from_flags(FLAGS, x.name)

    print('config', config)

    with tf.compat.v1.Session(config=config) as sess:
        x_test = x_test[:FLAGS.batch_size]
        y_test = y_test[:FLAGS.batch_size]
        start_time = time.time()
        y_conv_val = y_conv.eval(feed_dict={x: x_test, y_: y_test})
        elasped_time = (time.time() - start_time)
        print("total time(s)", np.round(elasped_time, 3))
        print('y_conv_val', np.round(y_conv_val, 2))

    y_test_batch = y_test[:FLAGS.batch_size]
    y_label_batch = np.argmax(y_test_batch, 1)

    correct_prediction = np.equal(np.argmax(y_conv_val, 1), y_label_batch)
    error_count = np.size(correct_prediction) - np.sum(correct_prediction)
    test_accuracy = np.mean(correct_prediction)
    print('Using HE_averaging')
    print('Error count:', error_count, 'of', FLAGS.batch_size, 'elements.')
    print('Accuracy: ', test_accuracy)
    
    #Using in repeated sampling,mode 3
	
    y_conv = cryptonets_test_squashed_mode(x,3)
    config = server_config_from_flags(FLAGS, x.name)

    print('config', config)

    with tf.compat.v1.Session(config=config) as sess:
        x_test = x_test[:FLAGS.batch_size]
        y_test = y_test[:FLAGS.batch_size]
        start_time = time.time()
        y_conv_val = y_conv.eval(feed_dict={x: x_test, y_: y_test})
        elasped_time = (time.time() - start_time)
        print("total time(s)", np.round(elasped_time, 3))
        print('y_conv_val', np.round(y_conv_val, 2))

    y_test_batch = y_test[:FLAGS.batch_size]
    y_label_batch = np.argmax(y_test_batch, 1)

    correct_prediction = np.equal(np.argmax(y_conv_val, 1), y_label_batch)
    error_count = np.size(correct_prediction) - np.sum(correct_prediction)
    test_accuracy = np.mean(correct_prediction)
    print('Using average of 4 partitions_repeated sampling')
    print('Error count:', error_count, 'of', FLAGS.batch_size, 'elements.')
    print('Accuracy: ', test_accuracy)
Ejemplo n.º 6
0
def run_server(FLAGS):
    (x_train, y_train, x_test,
     y_test) = load_mnist_data(FLAGS.start_batch, FLAGS.batch_size)

    # Load saved model
    tf.import_graph_def(load_pb_file(FLAGS.model_file))

    print("loaded model")
    print_nodes()

    # Get input / output tensors
    x_input = tf.compat.v1.get_default_graph().get_tensor_by_name(
        FLAGS.input_node)
    y_output = tf.compat.v1.get_default_graph().get_tensor_by_name(
        FLAGS.output_node)
    # y_max = tf.nn.softmax(y_output)
    # y_max = y_output
    # y_max = approximate_softmax(x=y_output, nr_terms=2)
    # y_max = max_pool(y_output=y_output, FLAGS=FLAGS)
    print('r_star: ', FLAGS.rstar)
    print('is r_star [-1.0]', FLAGS.rstar == [-1.0])
    r_star = None
    if FLAGS.rstar is None:
        # for debugging: we don't want any r*
        y_max = y_output
    else:
        if FLAGS.rstar == [-1.0]:
            # Generate random r_star
            if y_test is not None:
                r_shape = y_test.shape
                batch_size = r_shape[0]
                num_classes = r_shape[1]
            else:
                batch_size, num_classes = FLAGS.batch_size, FLAGS.num_classes
            r_star = get_rstar_server(
                max_logit=FLAGS.max_logit,
                batch_size=batch_size,
                num_classes=num_classes,
                exp=FLAGS.rstar_exp,
            )
        else:
            r_star = np.array(FLAGS.rstar)
        # r - r* (subtract the random vector r* from logits)
        y_max = tf.subtract(y_output,
                            tf.convert_to_tensor(r_star, dtype=tf.float32))

        if FLAGS.debug is True:
            print('y_max shape: ', y_max.shape)
            print('r_star shape: ', r_star.shape)
            y_max = tf.concat([y_max, r_star], axis=0)

    # Create configuration to encrypt input
    config = server_config_from_flags(FLAGS, x_input.name)
    with tf.compat.v1.Session(config=config) as sess:
        sess.run(tf.compat.v1.global_variables_initializer())
        start_time = time.time()
        y_hat = sess.run(y_max, feed_dict={x_input: x_test})
        # y_hat = y_max.eval(feed_dict={x_input: x_test})
        print('y_hat: ', y_hat)
        if FLAGS.debug is True:
            r_star = y_hat[FLAGS.batch_size:]
            y_hat = y_hat[:FLAGS.batch_size]
        print("logits (y_hat): ", array_str(y_hat))
        print("logits (y_hat) type: ", type(y_hat))
        print("logits (y_hat) shape: ", y_hat.shape)
        # print("change y_hat to one_hot encoding")
        y_pred = y_hat.argmax(axis=1)
        print("y_pred: ", y_pred)

        elasped_time = time.time() - start_time
        print("total time(s)", np.round(elasped_time, 3))
        party_id = int(FLAGS.port)

        if r_star is not None:
            print("doing 2pc")
            print('r_star (r*): ', array_str(r_star))
            if FLAGS.round_exp is not None:
                # r_star = (r_star * 2 ** FLAGS.round_exp).astype(np.int64)
                r_star = round_array(x=r_star, exp=FLAGS.round_exp)
                print('rounded r_star (r*): ', array_str(r_star))
            with open(f'{out_server_name}{party_id}.txt',
                      'w') as outfile:  # party id
                # assume batch size of 1 for now TODO: make this work for > 1 batch size
                for val in r_star.flatten():
                    outfile.write(f"{int(val)}" + '\n')
            time.sleep(1)
            process = subprocess.Popen([
                './gc-emp-test/bin/argmax_1', '1', '12345',
                f'{out_server_name}{party_id}.txt',
                f'{out_final_name}{party_id}.txt'
            ])
            # time.sleep(15)
            process.wait()
        else:
            print('r_star is None in he_server.py!')

    if not FLAGS.enable_client:
        y_test_label = np.argmax(y_test, 1)

        if FLAGS.batch_size < 60:
            print("y_hat", np.round(y_hat, 2))

        y_pred = np.argmax(y_hat, 1)
        correct_prediction = np.equal(y_pred, y_test_label)
        error_count = np.size(correct_prediction) - np.sum(correct_prediction)
        test_accuracy = np.mean(correct_prediction)

        print("Error count", error_count, "of", FLAGS.batch_size, "elements.")
        print("Accuracy: %g " % test_accuracy)