Example #1
0
def test_network(FLAGS):
    (x_train, y_train, x_test,
     y_test) = load_mnist_data(FLAGS.start_batch, FLAGS.batch_size)
    data = x_test.flatten("C")

    client = pyhe_client.HESealClient(
        FLAGS.hostname,
        FLAGS.port,
        FLAGS.batch_size,
        {FLAGS.tensor_name: (FLAGS.encrypt_data_str, data)},
    )

    results = np.round(client.get_results(), 2)

    y_pred_reshape = np.array(results).reshape(FLAGS.batch_size, 10)
    with np.printoptions(precision=3, suppress=True):
        print(y_pred_reshape)

    y_pred = y_pred_reshape.argmax(axis=1)
    print("y_pred", y_pred)

    correct = np.sum(np.equal(y_pred, y_test.argmax(axis=1)))
    acc = correct / float(FLAGS.batch_size)
    print("correct", correct)
    print("Accuracy (batch size", FLAGS.batch_size, ") =", acc * 100.0, "%")
Example #2
0
def test_cryptonets_relu(FLAGS):
    (x_train, y_train, x_test, y_test) = load_mnist_data()

    x = tf.compat.v1.placeholder(tf.float32, [None, 28, 28, 1], name='input')
    y_ = tf.compat.v1.placeholder(tf.float32, [None, 10])

    # Create the model
    y_conv = cryptonets_relu_test_squashed(x)

    config = server_config_from_flags(FLAGS, x.name)
    print('config', config)

    with tf.compat.v1.Session(config=config) as sess:
        x_test = x_test[:FLAGS.batch_size]
        y_test = y_test[:FLAGS.batch_size]
        start_time = time.time()
        y_conv_val = y_conv.eval(feed_dict={x: x_test, y_: y_test})
        elasped_time = (time.time() - start_time)
        print("total time(s)", np.round(elasped_time, 3))

    if not FLAGS.enable_client:
        y_test_batch = y_test[:FLAGS.batch_size]
        y_label_batch = np.argmax(y_test_batch, 1)

        y_pred = np.argmax(y_conv_val, 1)
        correct_prediction = np.equal(y_pred, y_label_batch)
        error_count = np.size(correct_prediction) - np.sum(correct_prediction)
        test_accuracy = np.mean(correct_prediction)

        print('Error count', error_count, 'of', FLAGS.batch_size, 'elements.')
        print('Accuracy: %g ' % test_accuracy)
Example #3
0
def test_mnist_cnn(FLAGS):
    (x_train, y_train, x_test, y_test) = load_mnist_data()

    x_test_batch = x_test[:FLAGS.batch_size]
    y_test_batch = y_test[:FLAGS.batch_size]

    data = x_test_batch.flatten('C')
    print('Client batch size from FLAG:', FLAGS.batch_size)

    port = 34000

    encrypt_str = 'encrypt' if FLAGS.encrypt_data else 'plain'
    client = pyhe_client.HESealClient(FLAGS.hostname, port, FLAGS.batch_size,
                                      {'input': (encrypt_str, data)})

    results = client.get_results()
    results = np.round(results, 2)

    y_pred_reshape = np.array(results).reshape(FLAGS.batch_size, 10)
    with np.printoptions(precision=3, suppress=True):
        print(y_pred_reshape)

    y_pred = y_pred_reshape.argmax(axis=1)
    print('y_pred', y_pred)
    y_true = y_test_batch.argmax(axis=1)

    correct = np.sum(np.equal(y_pred, y_true))
    acc = correct / float(FLAGS.batch_size)
    print('pred size', len(y_pred))
    print('correct', correct)
    print('Accuracy (batch size', FLAGS.batch_size, ') =', acc * 100., '%')
Example #4
0
def test_cryptonets_relu(FLAGS):
    (x_train, y_train, x_test, y_test) = load_mnist_data()

    x = tf.compat.v1.placeholder(tf.float32, [None, 28, 28, 1])
    y_ = tf.compat.v1.placeholder(tf.float32, [None, 10])

    # Create the model
    y_conv = cryptonets_relu_test_squashed(x)

    with tf.compat.v1.Session() as sess:
        x_test = x_test[:FLAGS.batch_size]
        y_test = y_test[:FLAGS.batch_size]
        start_time = time.time()
        y_conv_val = y_conv.eval(feed_dict={x: x_test, y_: y_test})
        elasped_time = (time.time() - start_time)
        print("total time(s)", np.round(elasped_time, 3))

    using_client = (os.environ.get('NGRAPH_ENABLE_CLIENT') is not None)
    if not using_client:
        y_test_batch = y_test[:FLAGS.batch_size]
        y_label_batch = np.argmax(y_test_batch, 1)

        y_pred = np.argmax(y_conv_val, 1)
        print('y_pred', y_pred)
        correct_prediction = np.equal(y_pred, y_label_batch)
        error_count = np.size(correct_prediction) - np.sum(correct_prediction)
        test_accuracy = np.mean(correct_prediction)

        print('Error count', error_count, 'of', FLAGS.batch_size, 'elements.')
        print('Accuracy: %g ' % test_accuracy)
Example #5
0
def test_mnist_cnn(FLAGS):
    (x_train, y_train, x_test, y_test) = load_mnist_data()

    x = tf.compat.v1.placeholder(tf.float32, [None, 28, 28, 1])
    y_ = tf.compat.v1.placeholder(tf.float32, [None, 10])

    # Create the model
    y_conv = cryptonets_test_squashed(x)

    with tf.compat.v1.Session() as sess:
        x_test = x_test[:FLAGS.batch_size]
        y_test = y_test[:FLAGS.batch_size]
        start_time = time.time()
        y_conv_val = y_conv.eval(feed_dict={x: x_test, y_: y_test})
        elasped_time = (time.time() - start_time)
        print("total time(s)", np.round(elasped_time, 3))

    y_test_batch = y_test[:FLAGS.batch_size]
    y_label_batch = np.argmax(y_test_batch, 1)

    correct_prediction = np.equal(np.argmax(y_conv_val, 1), y_label_batch)
    error_count = np.size(correct_prediction) - np.sum(correct_prediction)
    test_accuracy = np.mean(correct_prediction)

    print('Error count:', error_count, 'of', FLAGS.batch_size, 'elements.')
    print('Accuracy: ', test_accuracy)
def main(FLAGS):
    (x_train, y_train, x_test, y_test) = load_mnist_data()

    x = tf.compat.v1.placeholder(tf.float32, [None, 28, 28, 1])
    y_ = tf.compat.v1.placeholder(tf.float32, [None, 10])
    y_conv = model.cryptonets_relu_model(x, 'train')

    with tf.name_scope('loss'):
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
            labels=y_, logits=y_conv)
    cross_entropy = tf.reduce_mean(cross_entropy)

    with tf.name_scope('adam_optimizer'):
        train_step = tf.compat.v1.train.AdamOptimizer(1e-4).minimize(
            cross_entropy)

    with tf.name_scope('accuracy'):
        correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
        correct_prediction = tf.cast(correct_prediction, tf.float32)
    accuracy = tf.reduce_mean(correct_prediction)

    with tf.compat.v1.Session() as sess:
        sess.run(tf.compat.v1.global_variables_initializer())
        for i in range(FLAGS.train_loop_count):
            x_batch, y_batch = get_train_batch(i, FLAGS.batch_size, x_train,
                                               y_train)
            if i % 100 == 0:
                t = time.time()
                train_accuracy = accuracy.eval(feed_dict={
                    x: x_batch,
                    y_: y_batch
                })
                print('step %d, training accuracy %g, %g msec to evaluate' %
                      (i, train_accuracy, 1000 * (time.time() - t)))
            t = time.time()
            _, loss = sess.run([train_step, cross_entropy],
                               feed_dict={
                                   x: x_batch,
                                   y_: y_batch
                               })

            if i % 1000 == 999 or i == FLAGS.train_loop_count - 1:
                test_accuracy = accuracy.eval(feed_dict={
                    x: x_test,
                    y_: y_test
                })
                print('test accuracy %g' % test_accuracy)

        print("Training finished. Saving variables.")
        for var in tf.compat.v1.get_collection(
                tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES):
            weight = (sess.run([var]))[0].flatten().tolist()
            filename = (str(var).split())[1].replace('/', '_')
            filename = filename.replace("'", "").replace(':0', '') + '.txt'

            print("saving", filename)
            np.savetxt(str(filename), weight)

    squash_layers()
Example #7
0
def main(FLAGS):
    (x_train, y_train, x_test, y_test) = mnist_util.load_mnist_data()

    x = Input(shape=(
        28,
        28,
        1,
    ), name="input")

    y = model.cryptonets_model(x)
    cryptonets_model = Model(inputs=x, outputs=y)
    print(cryptonets_model.summary())

    def loss(labels, logits):
        return keras.losses.categorical_crossentropy(labels,
                                                     logits,
                                                     from_logits=True)

    optimizer = SGD(learning_rate=0.008, momentum=0.9)
    cryptonets_model.compile(optimizer=optimizer,
                             loss=loss,
                             metrics=["accuracy"])

    cryptonets_model.fit(x_train,
                         y_train,
                         epochs=FLAGS.epochs,
                         batch_size=FLAGS.batch_size,
                         validation_data=(x_test, y_test),
                         verbose=1)

    test_loss, test_acc = cryptonets_model.evaluate(x_test, y_test, verbose=1)
    print("Test accuracy:", test_acc)

    # Squash weights and save model
    weights = squash_layers(cryptonets_model,
                            tf.compat.v1.keras.backend.get_session())
    (conv1_weights, squashed_weights, fc1_weights, fc_1half_weights,
     fc2_weights) = weights[0:5]

    tf.reset_default_graph()
    sess = tf.compat.v1.Session()

    x = Input(shape=(
        28,
        28,
        1,
    ), name="input")
    y = model.cryptonets_model_squashed(x, conv1_weights, squashed_weights,
                                        fc_1half_weights, fc2_weights)
    sess.run(tf.compat.v1.global_variables_initializer())
    mnist_util.save_model(
        sess,
        ["output/BiasAdd"],
        "./models",
        "testnets_1",
    )
Example #8
0
def main(FLAGS):
    (x_train, y_train, x_test, y_test) = load_mnist_data()

    x = tf.compat.v1.placeholder(tf.float32, [None, 28, 28, 1], name='input')
    y_ = tf.compat.v1.placeholder(tf.float32, [None, 10])
    y_conv = model.mnist_mlp_model(x)

    with tf.name_scope('loss'):
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
            labels=y_, logits=y_conv)
    cross_entropy = tf.reduce_mean(cross_entropy)

    with tf.name_scope('adam_optimizer'):
        train_step = tf.compat.v1.train.AdamOptimizer(1e-4).minimize(
            cross_entropy)

    with tf.name_scope('accuracy'):
        correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
        correct_prediction = tf.cast(correct_prediction, tf.float32)
    accuracy = tf.reduce_mean(correct_prediction)

    with tf.compat.v1.Session() as sess:
        sess.run(tf.compat.v1.global_variables_initializer())
        for i in range(FLAGS.train_loop_count):
            x_batch, y_batch = get_train_batch(i, FLAGS.batch_size, x_train,
                                               y_train)
            if i % 100 == 0:
                t = time.time()
                train_accuracy = accuracy.eval(feed_dict={
                    x: x_batch,
                    y_: y_batch
                })
                print('step %d, training accuracy %g, %g msec to evaluate' %
                      (i, train_accuracy, 1000 * (time.time() - t)))
            t = time.time()
            _, loss = sess.run([train_step, cross_entropy],
                               feed_dict={
                                   x: x_batch,
                                   y_: y_batch
                               })
            if i % 1000 == 999 or i == FLAGS.train_loop_count - 1:
                test_accuracy = accuracy.eval(feed_dict={
                    x: x_test,
                    y_: y_test
                })
                print('test accuracy %g' % test_accuracy)

        print("Training finished. Saving model.")

        save_model(sess, './model', 'model')
Example #9
0
def test_mnist_cnn(FLAGS):
    (x_train, y_train, x_test, y_test) = load_mnist_data()

    batch_size = FLAGS.batch_size
    x_test_batch = x_test[:batch_size]
    y_test_batch = y_test[:FLAGS.batch_size]

    data = x_test_batch.swapaxes(1, 2).flatten('F')
    print('Client batch size from FLAG: ', batch_size)

    complex_scale_factor = 1
    if ('NGRAPH_COMPLEX_PACK' in os.environ):
        complex_scale_factor = 2

    print('complex_scale_factor', complex_scale_factor)

    # TODO: support even batch sizes
    assert (batch_size % complex_scale_factor == 0)

    hostname = 'localhost'
    port = 34000

    new_batch_size = batch_size // complex_scale_factor
    print('new_batch_size', new_batch_size)

    client = he_seal_client.HESealClient(hostname, port, new_batch_size, data)

    print('Sleeping until client is done')
    while not client.is_done():
        time.sleep(1)

    results = client.get_results()
    results = np.round(results, 2)

    y_pred_reshape = np.array(results).reshape(10, batch_size)
    with np.printoptions(precision=3, suppress=True):
        print(y_pred_reshape.T)

    y_pred = y_pred_reshape.argmax(axis=0)
    print('y_pred', y_pred)
    y_true = y_test_batch.argmax(axis=1)

    correct = np.sum(np.equal(y_pred, y_true))
    acc = correct / float(batch_size)
    print('pred size', len(y_pred))
    print('correct', correct)
    print('Accuracy (batch size', batch_size, ') =', acc * 100., '%')
Example #10
0
def train_model():
    (x_train, y_train, x_test, y_test) = load_mnist_data()

    x = Input(
        shape=(
            28,
            28,
            1,
        ), name="input")
    y = mnist_mlp_model(x)

    mlp_model = Model(inputs=x, outputs=y)
    print(mlp_model.summary())

    def loss(labels, logits):
        return categorical_crossentropy(labels, logits, from_logits=True)

    optimizer = Adam()
    mlp_model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"])

    t0 = time.perf_counter()
    mlp_model.fit(
        x_train,
        y_train,
        epochs=10,
        batch_size=128,
        validation_data=(x_test, y_test),
        verbose=1)

    t1 = time.perf_counter()
    cur_times['t_training'] = delta_ms(t0, t1)

    test_loss, test_acc = mlp_model.evaluate(x_test, y_test, verbose=1)
    print("\nTest accuracy:", test_acc)

    save_model(
        tf.compat.v1.keras.backend.get_session(),
        ["output/BiasAdd"],
        "./models",
        "mlp",
    )

    # If we want to have training and testing in the same run, we must clean up after training
    tf.keras.backend.clear_session
    tf.reset_default_graph()
Example #11
0
def get_data(name, party_id, n_parties, FLAGS):
    if name == 'mnist':
        (x_train, y_train, x_test,
         y_test) = load_mnist_data(FLAGS.start_batch, FLAGS.batch_size)
        leftover = len(x_train) % (n_parties + 1)
        x_train, y_train = x_train[:-leftover], y_train[:-leftover]
        leftover = len(x_test) % (n_parties + 1)
        x_test, y_Test = x_test[:-leftover], y_test[:-leftover]
        train_indices = np.arange(len(x_train))
        # TODO: party_t* variables below are not used.
        party_train_indices = np.split(train_indices, n_parties)[party_id]
        test_indices = np.arange(len(x_test))
        party_test_indices = np.split(test_indices, n_parties)[party_id]
        return (x_train[train_indices],
                y_train[train_indices]), (x_test[test_indices],
                                          y_test[test_indices])
    else:
        raise ValueError(f"Invalid dataset name: {name}.")
Example #12
0
def test_network():
    batch_size = 4000
    # Load MNIST data (for test set)
    (x_train, y_train, x_test, y_test) = load_mnist_data(start_batch=0,
                                                         batch_size=batch_size)

    # Load saved model
    tf.import_graph_def(load_pb_file("models/cryptonets.pb"))

    print("loaded model")
    print_nodes()

    # Get input / output tensors
    x_input = tf.compat.v1.get_default_graph().get_tensor_by_name(
        "import/input:0")
    y_output = tf.compat.v1.get_default_graph().get_tensor_by_name(
        "import/output/BiasAdd:0")

    # Create configuration to encrypt input
    config = server_config(x_input.name)
    with tf.compat.v1.Session(config=config) as sess:
        sess.run(tf.compat.v1.global_variables_initializer())
        start_time = time.time()
        t0 = time.perf_counter()
        y_hat = y_output.eval(feed_dict={x_input: x_test})
        t1 = time.perf_counter()
        cur_times['t_computation'] = delta_ms(t0, t1)
        elasped_time = time.time() - start_time
        print("total time(s)", np.round(elasped_time, 3))

    y_test_label = np.argmax(y_test, 1)

    if batch_size < 60:
        print("y_hat", np.round(y_hat, 2))

    y_pred = np.argmax(y_hat, 1)
    correct_prediction = np.equal(y_pred, y_test_label)
    error_count = np.size(correct_prediction) - np.sum(correct_prediction)
    test_accuracy = np.mean(correct_prediction)

    print("Error count", error_count, "of", batch_size, "elements.")
    print("Accuracy: %g " % test_accuracy)
    cur_times['test_accuracy'] = test_accuracy
Example #13
0
def test_mnist_cnn(FLAGS):
    (x_train, y_train, x_test, y_test) = load_mnist_data()

    batch_size = FLAGS.batch_size
    x_test_batch = x_test[:batch_size]
    y_test_batch = y_test[:FLAGS.batch_size]

    data = x_test_batch.flatten('C')
    print('Client batch size from FLAG:', batch_size)

    complex_packing = False
    if ('NGRAPH_COMPLEX_PACK' in os.environ):
        complex_packing = str2bool(os.environ['NGRAPH_COMPLEX_PACK'])

    hostname = 'localhost'
    port = 34000

    print('complex_packing?', complex_packing)

    client = pyhe_client.HESealClient(hostname, port, batch_size, data,
                                      complex_packing)

    print('Sleeping until client is done')
    while not client.is_done():
        time.sleep(1)

    results = client.get_results()
    results = np.round(results, 2)

    y_pred_reshape = np.array(results).reshape(batch_size, 10)
    with np.printoptions(precision=3, suppress=True):
        print(y_pred_reshape)

    y_pred = y_pred_reshape.argmax(axis=1)
    print('y_pred', y_pred)
    y_true = y_test_batch.argmax(axis=1)

    correct = np.sum(np.equal(y_pred, y_true))
    acc = correct / float(batch_size)
    print('pred size', len(y_pred))
    print('correct', correct)
    print('Accuracy (batch size', batch_size, ') =', acc * 100., '%')
Example #14
0
def test_mnist_mlp(FLAGS):
    (x_train, y_train, x_test, y_test) = load_mnist_data()
    x_test = x_test[:FLAGS.batch_size]
    y_test = y_test[:FLAGS.batch_size]

    graph_def = load_pb_file('./model/model.pb')

    with tf.Graph().as_default():
        tf.import_graph_def(graph_def)
        y_conv = tf.compat.v1.get_default_graph().get_tensor_by_name(
            "import/output:0")
        x_input = tf.compat.v1.get_default_graph().get_tensor_by_name(
            "import/input:0")

        config = server_config_from_flags(FLAGS, x_input.name)

        print('config', config)

        with tf.compat.v1.Session(config=config) as sess:
            start_time = time.time()
            y_conv_val = y_conv.eval(
                session=sess, feed_dict={
                    x_input: x_test,
                })
            elasped_time = (time.time() - start_time)
            print("total time(s)", np.round(elasped_time, 3))

    if not FLAGS.enable_client:
        y_test_batch = y_test[:FLAGS.batch_size]
        y_label_batch = np.argmax(y_test_batch, 1)

        y_pred = np.argmax(y_conv_val, 1)
        print('y_pred', y_pred)
        correct_prediction = np.equal(y_pred, y_label_batch)
        error_count = np.size(correct_prediction) - np.sum(correct_prediction)
        test_accuracy = np.mean(correct_prediction)

        print('Error count', error_count, 'of', FLAGS.batch_size, 'elements.')
        print('Accuracy: %g ' % test_accuracy)
Example #15
0
def test_network(FLAGS):
    (x_train, y_train, x_test,
     y_test) = load_mnist_data(FLAGS.start_batch, FLAGS.batch_size)

    # Load saved model
    tf.import_graph_def(load_pb_file(FLAGS.model_file))

    print("loaded model")
    print_nodes()

    # Get input / output tensors
    x_input = tf.compat.v1.get_default_graph().get_tensor_by_name(
        FLAGS.input_node)
    y_output = tf.compat.v1.get_default_graph().get_tensor_by_name(
        FLAGS.output_node)

    # Create configuration to encrypt input
    FLAGS, unparsed = server_argument_parser().parse_known_args()
    config = server_config_from_flags(FLAGS, x_input.name)
    with tf.compat.v1.Session(config=config) as sess:
        sess.run(tf.compat.v1.global_variables_initializer())
        start_time = time.time()
        y_hat = y_output.eval(feed_dict={x_input: x_test})
        elasped_time = time.time() - start_time
        print("total time(s)", np.round(elasped_time, 3))

    if not FLAGS.enable_client:
        y_test_label = np.argmax(y_test, 1)

        if FLAGS.batch_size < 60:
            print("y_hat", np.round(y_hat, 2))

        y_pred = np.argmax(y_hat, 1)
        correct_prediction = np.equal(y_pred, y_test_label)
        error_count = np.size(correct_prediction) - np.sum(correct_prediction)
        test_accuracy = np.mean(correct_prediction)

        print("Error count", error_count, "of", FLAGS.batch_size, "elements.")
        print("Accuracy: %g " % test_accuracy)
Example #16
0
def test_mnist_mlp(FLAGS):
    (x_train, y_train, x_test, y_test) = load_mnist_data()
    x_test = x_test[:FLAGS.batch_size]
    y_test = y_test[:FLAGS.batch_size]

    graph_def = load_pb_file('./model/model.pb')

    with tf.Graph().as_default():
        tf.import_graph_def(graph_def)
        y_conv = tf.compat.v1.get_default_graph().get_tensor_by_name(
            "import/output:0")
        x_input = tf.compat.v1.get_default_graph().get_tensor_by_name(
            "import/Placeholder:0")
        sess = tf.compat.v1.Session()

        start_time = time.time()
        y_conv_val = y_conv.eval(
            session=sess, feed_dict={
                x_input: x_test,
            })
        elasped_time = (time.time() - start_time)
        print("total time(s)", np.round(elasped_time, 3))

    using_client = (os.environ.get('NGRAPH_ENABLE_CLIENT') is not None)

    if not using_client:
        y_test_batch = y_test[:FLAGS.batch_size]
        y_label_batch = np.argmax(y_test_batch, 1)

        y_pred = np.argmax(y_conv_val, 1)
        print('y_pred', y_pred)
        correct_prediction = np.equal(y_pred, y_label_batch)
        error_count = np.size(correct_prediction) - np.sum(correct_prediction)
        test_accuracy = np.mean(correct_prediction)

        print('Error count', error_count, 'of', FLAGS.batch_size, 'elements.')
        print('Accuracy: %g ' % test_accuracy)
Example #17
0
def main(FLAGS):
    (x_train, y_train, x_test, y_test) = mnist_util.load_mnist_data()

    x = Input(
        shape=(
            28,
            28,
            1,
        ), name="input")
    y = model.mnist_mlp_model(x)

    mlp_model = Model(inputs=x, outputs=y)
    print(mlp_model.summary())

    def loss(labels, logits):
        return categorical_crossentropy(labels, logits, from_logits=True)

    optimizer = SGD(learning_rate=0.008, momentum=0.9)
    mlp_model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"])

    mlp_model.fit(
        x_train,
        y_train,
        epochs=FLAGS.epochs,
        batch_size=FLAGS.batch_size,
        validation_data=(x_test, y_test),
        verbose=1)

    test_loss, test_acc = mlp_model.evaluate(x_test, y_test, verbose=1)
    print("\nTest accuracy:", test_acc)

    mnist_util.save_model(
        tf.compat.v1.keras.backend.get_session(),
        ["output/BiasAdd"],
        "./models",
        "mlp",
    )
Example #18
0
def train_model():
    (x_train, y_train, x_test, y_test) = load_mnist_data()

    x = Input(shape=(
        28,
        28,
        1,
    ), name="input")

    y = cryptonets_model(x)
    cryptonets_model_var = Model(inputs=x, outputs=y)
    print(cryptonets_model_var.summary())

    def loss(labels, logits):
        return categorical_crossentropy(labels, logits, from_logits=True)

    optimizer = Adam()
    cryptonets_model_var.compile(optimizer=optimizer,
                                 loss=loss,
                                 metrics=["accuracy"])

    t0 = time.perf_counter()
    cryptonets_model_var.fit(x_train,
                             y_train,
                             epochs=10,
                             batch_size=128,
                             validation_data=(x_test, y_test),
                             verbose=1)
    t1 = time.perf_counter()
    cur_times['t_training'] = delta_ms(t0, t1)

    test_loss, test_acc = cryptonets_model_var.evaluate(x_test,
                                                        y_test,
                                                        verbose=1)
    print("Test accuracy:", test_acc)

    # Squash weights and save model
    weights = squash_layers(cryptonets_model_var,
                            tf.compat.v1.keras.backend.get_session())
    (conv1_weights, squashed_weights, fc1_weights, fc2_weights, act1_weights,
     act2_weights) = weights[0:6]

    tf.reset_default_graph()
    sess = tf.compat.v1.Session()

    x = Input(shape=(
        28,
        28,
        1,
    ), name="input")
    y = cryptonets_model_squashed(x, conv1_weights, squashed_weights,
                                  fc2_weights, act1_weights, act2_weights)
    sess.run(tf.compat.v1.global_variables_initializer())
    save_model(
        sess,
        ["output/BiasAdd"],
        "./models",
        "cryptonets",
    )

    # If we want to have training and testing in the same run, we must clean up after training
    tf.keras.backend.clear_session
    tf.reset_default_graph()
Example #19
0
def test_mnist_cnn(FLAGS):
    (x_train, y_train, x_test, y_test) = load_mnist_data()

    x = tf.compat.v1.placeholder(tf.float32, [None, 28, 28, 1], name='input')
    y_ = tf.compat.v1.placeholder(tf.float32, [None, 10])

    # Create the model
    #Using full dataset
    y_conv = cryptonets_test_squashed(x)

    config = server_config_from_flags(FLAGS, x.name)

    print('config', config)

    with tf.compat.v1.Session(config=config) as sess:
        x_test = x_test[:FLAGS.batch_size]
        y_test = y_test[:FLAGS.batch_size]
        start_time = time.time()
        y_conv_val = y_conv.eval(feed_dict={x: x_test, y_: y_test})
        elasped_time = (time.time() - start_time)
        print("total time(s)", np.round(elasped_time, 3))
        print('y_conv_val', np.round(y_conv_val, 2))

    y_test_batch = y_test[:FLAGS.batch_size]
    y_label_batch = np.argmax(y_test_batch, 1)

    correct_prediction = np.equal(np.argmax(y_conv_val, 1), y_label_batch)
    error_count = np.size(correct_prediction) - np.sum(correct_prediction)
    test_accuracy = np.mean(correct_prediction)
    print('Using full dataset')
    print('Error count:', error_count, 'of', FLAGS.batch_size, 'elements.')
    print('Accuracy: ', test_accuracy)

    #Using in HE_averaging
	
    y_conv = cryptonets_HE_avg(x)
    config = server_config_from_flags(FLAGS, x.name)

    print('config', config)

    with tf.compat.v1.Session(config=config) as sess:
        x_test = x_test[:FLAGS.batch_size]
        y_test = y_test[:FLAGS.batch_size]
        start_time = time.time()
        y_conv_val = y_conv.eval(feed_dict={x: x_test, y_: y_test})
        elasped_time = (time.time() - start_time)
        print("total time(s)", np.round(elasped_time, 3))
        print('y_conv_val', np.round(y_conv_val, 2))

    y_test_batch = y_test[:FLAGS.batch_size]
    y_label_batch = np.argmax(y_test_batch, 1)

    correct_prediction = np.equal(np.argmax(y_conv_val, 1), y_label_batch)
    error_count = np.size(correct_prediction) - np.sum(correct_prediction)
    test_accuracy = np.mean(correct_prediction)
    print('Using HE_averaging')
    print('Error count:', error_count, 'of', FLAGS.batch_size, 'elements.')
    print('Accuracy: ', test_accuracy)
    
    #Using in repeated sampling,mode 3
	
    y_conv = cryptonets_test_squashed_mode(x,3)
    config = server_config_from_flags(FLAGS, x.name)

    print('config', config)

    with tf.compat.v1.Session(config=config) as sess:
        x_test = x_test[:FLAGS.batch_size]
        y_test = y_test[:FLAGS.batch_size]
        start_time = time.time()
        y_conv_val = y_conv.eval(feed_dict={x: x_test, y_: y_test})
        elasped_time = (time.time() - start_time)
        print("total time(s)", np.round(elasped_time, 3))
        print('y_conv_val', np.round(y_conv_val, 2))

    y_test_batch = y_test[:FLAGS.batch_size]
    y_label_batch = np.argmax(y_test_batch, 1)

    correct_prediction = np.equal(np.argmax(y_conv_val, 1), y_label_batch)
    error_count = np.size(correct_prediction) - np.sum(correct_prediction)
    test_accuracy = np.mean(correct_prediction)
    print('Using average of 4 partitions_repeated sampling')
    print('Error count:', error_count, 'of', FLAGS.batch_size, 'elements.')
    print('Accuracy: ', test_accuracy)
Example #20
0
        with open("output.txt", 'w') as outfile:
            for v in array_sum.flatten():
                outfile.write(f'{v}\n')
        csp_filenames = [f'noise{port}privacy.txt' for port in FLAGS.ports]
        label = csp.get_histogram(
            client_filename='output.txt',
            csp_filenames=csp_filenames,
            csp_sum_filename='final.txt')
        print(label)


if __name__ == "__main__":
    FLAGS, unparsed = client_argument_parser().parse_known_args()
    if unparsed:
        print("Unparsed flags:", unparsed)
        exit(1)

    (x_train, y_train, x_test, y_test) = load_mnist_data(
        FLAGS.start_batch, FLAGS.batch_size)

    is_test = False
    if is_test:
        data = mnist_x_test
        y_test = [mnist_y_test]
    else:
        data = x_test.flatten("C")
        # print('data (x_test): ', data)
        # print('y_test: ', y_test)

    run_client(FLAGS=FLAGS, data=data, labels=y_test)
Example #21
0
def run_server(FLAGS):
    (x_train, y_train, x_test,
     y_test) = load_mnist_data(FLAGS.start_batch, FLAGS.batch_size)

    # Load saved model
    tf.import_graph_def(load_pb_file(FLAGS.model_file))

    print("loaded model")
    print_nodes()

    # Get input / output tensors
    x_input = tf.compat.v1.get_default_graph().get_tensor_by_name(
        FLAGS.input_node)
    y_output = tf.compat.v1.get_default_graph().get_tensor_by_name(
        FLAGS.output_node)
    # y_max = tf.nn.softmax(y_output)
    # y_max = y_output
    # y_max = approximate_softmax(x=y_output, nr_terms=2)
    # y_max = max_pool(y_output=y_output, FLAGS=FLAGS)
    print('r_star: ', FLAGS.rstar)
    print('is r_star [-1.0]', FLAGS.rstar == [-1.0])
    r_star = None
    if FLAGS.rstar is None:
        # for debugging: we don't want any r*
        y_max = y_output
    else:
        if FLAGS.rstar == [-1.0]:
            # Generate random r_star
            if y_test is not None:
                r_shape = y_test.shape
                batch_size = r_shape[0]
                num_classes = r_shape[1]
            else:
                batch_size, num_classes = FLAGS.batch_size, FLAGS.num_classes
            r_star = get_rstar_server(
                max_logit=FLAGS.max_logit,
                batch_size=batch_size,
                num_classes=num_classes,
                exp=FLAGS.rstar_exp,
            )
        else:
            r_star = np.array(FLAGS.rstar)
        # r - r* (subtract the random vector r* from logits)
        y_max = tf.subtract(y_output,
                            tf.convert_to_tensor(r_star, dtype=tf.float32))

        if FLAGS.debug is True:
            print('y_max shape: ', y_max.shape)
            print('r_star shape: ', r_star.shape)
            y_max = tf.concat([y_max, r_star], axis=0)

    # Create configuration to encrypt input
    config = server_config_from_flags(FLAGS, x_input.name)
    with tf.compat.v1.Session(config=config) as sess:
        sess.run(tf.compat.v1.global_variables_initializer())
        start_time = time.time()
        y_hat = sess.run(y_max, feed_dict={x_input: x_test})
        # y_hat = y_max.eval(feed_dict={x_input: x_test})
        print('y_hat: ', y_hat)
        if FLAGS.debug is True:
            r_star = y_hat[FLAGS.batch_size:]
            y_hat = y_hat[:FLAGS.batch_size]
        print("logits (y_hat): ", array_str(y_hat))
        print("logits (y_hat) type: ", type(y_hat))
        print("logits (y_hat) shape: ", y_hat.shape)
        # print("change y_hat to one_hot encoding")
        y_pred = y_hat.argmax(axis=1)
        print("y_pred: ", y_pred)

        elasped_time = time.time() - start_time
        print("total time(s)", np.round(elasped_time, 3))
        party_id = int(FLAGS.port)

        if r_star is not None:
            print("doing 2pc")
            print('r_star (r*): ', array_str(r_star))
            if FLAGS.round_exp is not None:
                # r_star = (r_star * 2 ** FLAGS.round_exp).astype(np.int64)
                r_star = round_array(x=r_star, exp=FLAGS.round_exp)
                print('rounded r_star (r*): ', array_str(r_star))
            with open(f'{out_server_name}{party_id}.txt',
                      'w') as outfile:  # party id
                # assume batch size of 1 for now TODO: make this work for > 1 batch size
                for val in r_star.flatten():
                    outfile.write(f"{int(val)}" + '\n')
            time.sleep(1)
            process = subprocess.Popen([
                './gc-emp-test/bin/argmax_1', '1', '12345',
                f'{out_server_name}{party_id}.txt',
                f'{out_final_name}{party_id}.txt'
            ])
            # time.sleep(15)
            process.wait()
        else:
            print('r_star is None in he_server.py!')

    if not FLAGS.enable_client:
        y_test_label = np.argmax(y_test, 1)

        if FLAGS.batch_size < 60:
            print("y_hat", np.round(y_hat, 2))

        y_pred = np.argmax(y_hat, 1)
        correct_prediction = np.equal(y_pred, y_test_label)
        error_count = np.size(correct_prediction) - np.sum(correct_prediction)
        test_accuracy = np.mean(correct_prediction)

        print("Error count", error_count, "of", FLAGS.batch_size, "elements.")
        print("Accuracy: %g " % test_accuracy)