Ejemplo n.º 1
0
    print("y_A shape", y_A.shape)
    print("X_B shape", X_B.shape)
    print("y_B shape", y_B.shape)

    print("overlap_indexes len", len(overlap_indexes))
    print("non_overlap_indexes len", len(non_overlap_indexes))
    print("validate_indexes len", len(validate_indexes))
    print("test_indexes len", len(test_indexes))

    print(
        "################################ Build Federated Models ############################"
    )

    tf.reset_default_graph()

    autoencoder_A = Autoencoder(1)
    autoencoder_B = Autoencoder(2)

    autoencoder_A.build(X_A.shape[-1], 200, learning_rate=0.01)
    autoencoder_B.build(X_B.shape[-1], 200, learning_rate=0.01)

    # alpha = 100
    fake_model_param = FakeFTLModelParam()
    partyA = PlainFTLGuestModel(autoencoder_A, fake_model_param)
    partyB = PlainFTLHostModel(autoencoder_B, fake_model_param)

    federatedLearning = LocalPlainFederatedTransferLearning(partyA, partyB)

    print(
        "################################ Train Federated Models ############################"
    )
Ejemplo n.º 2
0
 def _create_local_model(self, local_model_param):
     autoencoder = Autoencoder("local_host_model_01")
     autoencoder.build(input_dim=local_model_param.input_dim, hidden_dim=local_model_param.encode_dim,
                       learning_rate=local_model_param.learning_rate)
     return autoencoder
Ejemplo n.º 3
0
 def _create_local_model(self, ftl_local_model_param, ftl_data_param):
     autoencoder = Autoencoder("local_ftl_host_model_01")
     autoencoder.build(input_dim=ftl_data_param.n_feature_host,
                       hidden_dim=ftl_local_model_param.encode_dim,
                       learning_rate=ftl_local_model_param.learning_rate)
     return autoencoder
Ejemplo n.º 4
0
    def test_autoencoder_restore_model(self):

        X = np.array([[4, 2, 3], [6, 5, 1], [3, 4, 1], [1, 2, 3]])

        _, D = X.shape

        tf.reset_default_graph()
        autoencoder = Autoencoder(0)
        autoencoder.build(D, 5)
        init_op = tf.global_variables_initializer()
        with tf.Session() as session:
            autoencoder.set_session(session)
            session.run(init_op)
            autoencoder.fit(X, epoch=10)
            model_parameters = autoencoder.get_model_parameters()

        tf.reset_default_graph()

        autoencoder.restore_model(model_parameters)
        init_op = tf.global_variables_initializer()
        with tf.Session() as session:
            autoencoder.set_session(session)
            session.run(init_op)
            Wh = autoencoder.Wh.eval()
            Wo = autoencoder.Wo.eval()
            bh = autoencoder.bh.eval()
            bo = autoencoder.bo.eval()

        assert_matrix(model_parameters["Wh"], Wh)
        assert_matrix(model_parameters["Wo"], Wo)
        assert_matrix(model_parameters["bh"], bh)
        assert_matrix(model_parameters["bo"], bo)
Ejemplo n.º 5
0
def test_single_autoencoder():

    # To run this test, you may first download MINST dataset from kaggle:
    # https://www.kaggle.com/ngbolin/mnist-dataset-digit-recognizer
    file_path = '../../../../data/MINST/train.csv'
    Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST(file_path)
    Xtrain = Xtrain.astype(np.float32)
    Xtest = Xtest.astype(np.float32)

    _, D = Xtrain.shape

    autoencoder = Autoencoder(0)
    autoencoder.build(D, 200)
    init_op = tf.global_variables_initializer()
    with tf.Session() as session:
        autoencoder.set_session(session)
        session.run(init_op)
        autoencoder.fit(Xtrain, epoch=1, show_fig=True)

        i = np.random.choice(len(Xtest))
        x = Xtest[i]
        y = autoencoder.predict([x])

        plt.subplot(1, 2, 1)
        plt.imshow(x.reshape(28, 28), cmap='gray')
        plt.title('Original')

        plt.subplot(1, 2, 2)
        plt.imshow(y.reshape(28, 28), cmap='gray')
        plt.title('Reconstructed')
        plt.show()

        model_parameters = autoencoder.get_model_parameters()

    # test whether autoencoder can be restored from stored model parameters
    tf.reset_default_graph()

    autoencoder_2 = Autoencoder(0)
    autoencoder_2.restore_model(model_parameters)
    init_op = tf.global_variables_initializer()
    with tf.Session() as session:
        autoencoder_2.set_session(session)
        session.run(init_op)

        y_hat = autoencoder_2.predict([x])

        plt.subplot(1, 2, 1)
        plt.imshow(y.reshape(28, 28), cmap='gray')
        plt.title('Original')

        plt.subplot(1, 2, 2)
        plt.imshow(y_hat.reshape(28, 28), cmap='gray')
        plt.title('Reconstructed')
        plt.show()

        assert_matrix(y, y_hat)