Beispiel #1
0
def invert2(settings,
            samples,
            para_path,
            g_tolerance=None,
            e_tolerance=0.1,
            n_iter=None,
            max_iter=10000,
            heuristic_sigma=None):
    """
    Return the latent space points corresponding to a set of a samples (from gradient descent)
    Note: this function is designed for ONE sample generation
    """
    # num_samples = samples.shape[0]
    # cast samples to float32

    samples = np.float32(samples)

    # get the model
    # if settings is a string, assume it's an identifier and load
    if type(settings) == str:
        settings = json.load(
            open('./experiments/settings/' + settings + '.txt', 'r'))

    # print('Inverting', 1, 'samples using model', settings['identifier'], 'at epoch', epoch,)
    # if not g_tolerance is None:
    #     print('until gradient norm is below', g_tolerance)
    # else:
    #     print('until error is below', e_tolerance)

    # get parameters
    parameters = model.load_parameters(para_path)
    # # assertions
    # assert samples.shape[2] == settings['num_generated_features']
    # create VARIABLE Z
    Z = tf.compat.v1.get_variable(
        name='Z',
        shape=[1, settings['seq_length'], settings['latent_dim']],
        initializer=tf.compat.v1.random_normal_initializer())
    # create outputs

    G_samples = generator_o(Z,
                            settings['hidden_units_g'],
                            settings['seq_length'],
                            1,
                            settings['num_generated_features'],
                            reuse=False,
                            parameters=parameters)
    # generator_vars = ['hidden_units_g', 'seq_length', 'batch_size', 'num_generated_features', 'cond_dim', 'learn_scale']
    # generator_settings = dict((k, settings[k]) for k in generator_vars)
    # G_samples = model.generator(Z, **generator_settings, reuse=True)

    fd = None

    # define loss mmd-based loss
    if heuristic_sigma is None:
        # heuristic_sigma = mmd.median_pairwise_distance_o(samples)  # this is noisy
        # heuristic_sigma = mmd.median_pairwise_distance_o(samples)  + np.(0.0000001).astype(dtype=float32)
        heuristic_sigma = mmd.median_pairwise_distance_o(samples) + np.array(
            0.0000001, dtype=np.float32)
        print('heuristic_sigma:', heuristic_sigma)
    samples = tf.compat.v1.reshape(
        samples,
        [1, settings['seq_length'], settings['num_generated_features']])
    # Kxx, Kxy, Kyy, wts = mmd._mix_rbf_kernel(G_samples, samples, sigmas=tf.constant(value=heuristic_sigma, shape=(1, 1)))

    # base = 1.0
    # sigma_list = [1, 2, 4, 8, 16]
    # sigma_list = [sigma / base for sigma in sigma_list]
    # Kxx, Kxy, Kyy, len_sigma_list = mmd._mix_rbf_kernel2(G_samples, samples, sigma_list)

    # ---------------------
    X = G_samples
    Y = samples
    sigmas = tf.constant(value=heuristic_sigma, shape=(1, 1))
    wts = [1.0] * sigmas.get_shape()[0]
    if len(X.shape) == 2:
        # matrix
        XX = tf.compat.v1.matmul(X, X, transpose_b=True)
        XY = tf.compat.v1.matmul(X, Y, transpose_b=True)
        YY = tf.compat.v1.matmul(Y, Y, transpose_b=True)
    elif len(X.shape) == 3:
        # tensor -- this is computing the Frobenius norm
        XX = tf.compat.v1.tensordot(X, X, axes=[[1, 2], [1, 2]])
        XY = tf.compat.v1.tensordot(X, Y, axes=[[1, 2], [1, 2]])
        YY = tf.compat.v1.tensordot(Y, Y, axes=[[1, 2], [1, 2]])
    else:
        raise ValueError(X)
    X_sqnorms = tf.compat.v1.diag_part(XX)
    Y_sqnorms = tf.compat.v1.diag_part(YY)
    r = lambda x: tf.compat.v1.expand_dims(x, 0)
    c = lambda x: tf.compat.v1.expand_dims(x, 1)
    K_XX, K_XY, K_YY = 0, 0, 0
    for sigma, wt in zip(tf.compat.v1.unstack(sigmas, axis=0), wts):
        gamma = 1 / (2 * sigma**2)
        K_XX += wt * tf.compat.v1.exp(-gamma *
                                      (-2 * XX + c(X_sqnorms) + r(X_sqnorms)))
        K_XY += wt * tf.compat.v1.exp(-gamma *
                                      (-2 * XY + c(X_sqnorms) + r(Y_sqnorms)))
        K_YY += wt * tf.compat.v1.exp(-gamma *
                                      (-2 * YY + c(Y_sqnorms) + r(Y_sqnorms)))
    Kxx = K_XX
    Kxy = K_XY
    Kyy = K_YY
    wts = tf.compat.v1.reduce_sum(wts)
    # ---------------------

    similarity_per_sample = tf.compat.v1.diag_part(Kxy)
    reconstruction_error_per_sample = 1 - similarity_per_sample
    # reconstruction_error_per_sample = tf.reduce_sum((tf.nn.l2_normalize(G_samples, dim=1) - tf.nn.l2_normalize(samples, dim=1))**2, axis=[1,2])
    reconstruction_error = 1 - tf.compat.v1.reduce_mean(similarity_per_sample)

    # updater
    # solver = tf.compat.v1.train.AdamOptimizer().minimize(reconstruction_error_per_sample, var_list=[Z])
    # solver = tf.train.RMSPropOptimizer(learning_rate=500).minimize(reconstruction_error, var_list=[Z])
    solver = tf.compat.v1.train.RMSPropOptimizer(learning_rate=0).minimize(
        reconstruction_error_per_sample, var_list=[Z])
    # solver = tf.train.MomentumOptimizer(learning_rate=0.1, momentum=0.9).minimize(reconstruction_error_per_sample, var_list=[Z])

    # grad_Z = tf.compat.v1.gradients(reconstruction_error_per_sample, Z)[0]
    grad_Z = tf.compat.v1.gradients(reconstruction_error, Z)[0]
    grad_per_Z = tf.compat.v1.norm(grad_Z, axis=(1, 2))
    grad_norm = tf.compat.v1.reduce_mean(grad_per_Z)
    # solver = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(reconstruction_error, var_list=[Z])
    print('Finding latent state corresponding to samples...')

    sess = tf.compat.v1.Session()
    sess.run(tf.compat.v1.global_variables_initializer())
    with tf.compat.v1.Session() as sess:
        # graph = tf.compat.v1.Graph()
        # graphDef = graph.as_graph_def()
        sess.run(tf.compat.v1.global_variables_initializer())
        error = sess.run(reconstruction_error, feed_dict=fd)
        g_n = sess.run(grad_norm, feed_dict=fd)
        # print(g_n)
        i = 0
        if not n_iter is None:
            while i < n_iter:
                _ = sess.run(solver, feed_dict=fd)
                error = sess.run(reconstruction_error, feed_dict=fd)
                i += 1
        else:
            if not g_tolerance is None:
                while g_n > g_tolerance:
                    _ = sess.run(solver, feed_dict=fd)
                    error, g_n = sess.run([reconstruction_error, grad_norm],
                                          feed_dict=fd)
                    i += 1
                    print(error, g_n)
                    if i > max_iter:
                        break
            else:
                while np.abs(error) > e_tolerance:
                    # while ((np.abs(error) > e_tolerance) or (math.isnan(error))):
                    solver.run(feed_dict=fd)
                    # error = sess.run(reconstruction_error, feed_dict=fd)

                    reconstruction_error_out, reconstruction_error_per_sample_out, similarity_per_sample_out, Kxy_out, G_samples_out, samples_out, Z_out, X_sqnorms_out, Y_sqnorms_out, XY_out, sigmas_out = sess.run(
                        [
                            reconstruction_error,
                            reconstruction_error_per_sample,
                            similarity_per_sample, Kxy, G_samples, samples, Z,
                            X_sqnorms, Y_sqnorms, XY, sigmas
                        ],
                        feed_dict=fd)
                    if (math.isnan(reconstruction_error_out)):
                        print("nan")

                    # Zs, Gs, Kxy_s, error_per_sample, error = sess.run([Z, G_samples, Kxy, reconstruction_error_per_sample, reconstruction_error], feed_dict=fd)
                    # Zs, Gs, Kxy_s, error_per_sample, error, _ = sess.run([Z, G_samples, Kxy, reconstruction_error_per_sample, reconstruction_error, solver], feed_dict=fd)
                    i += 1
                    # print(error)
                    if i > max_iter:
                        break
        Zs = sess.run(Z, feed_dict=fd)
        Gs = sess.run(G_samples, feed_dict={Z: Zs})
        error_per_sample = sess.run(reconstruction_error_per_sample,
                                    feed_dict=fd)
        print('Z found in', i, 'iterations with final reconstruction error of',
              error)
    tf.compat.v1.reset_default_graph()

    return Gs, Zs, error_per_sample, heuristic_sigma
Beispiel #2
0
def invert2(settings,
            samples,
            para_path,
            g_tolerance=None,
            e_tolerance=0.1,
            n_iter=None,
            max_iter=10000,
            heuristic_sigma=None):
    """
    Return the latent space points corresponding to a set of a samples (from gradient descent)
    Note: this function is designed for ONE sample generation
    """
    # num_samples = samples.shape[0]
    # cast samples to float32

    samples = np.float32(samples)

    # get the model
    # if settings is a string, assume it's an identifier and load
    if type(settings) == str:
        settings = json.load(
            open('./experiments/settings/' + settings + '.txt', 'r'))

    # get parameters
    parameters = model.load_parameters(para_path)
    Z = tf.compat.v1.get_variable(
        name='Z',
        shape=[1, settings['seq_length'], settings['latent_dim']],
        initializer=tf.compat.v1.random_normal_initializer())
    # create outputs
    G_samples = generator_o(Z,
                            settings['hidden_units_g'],
                            settings['seq_length'],
                            1,
                            settings['num_generated_features'],
                            reuse=False,
                            parameters=parameters)
    # generator_vars = ['hidden_units_g', 'seq_length', 'batch_size', 'num_generated_features', 'cond_dim', 'learn_scale']
    # generator_settings = dict((k, settings[k]) for k in generator_vars)
    # G_samples = model.generator(Z, **generator_settings, reuse=True)
    # G_samples = model.generator(Z, settings['hidden_units_g'], settings['seq_length'], 1, settings['num_generated_features'], reuse=False, parameters=parameters)

    fd = None

    # define loss mmd-based loss
    if heuristic_sigma is None:
        heuristic_sigma = mmd.median_pairwise_distance_o(
            samples)  # this is noisy
        print('heuristic_sigma:', heuristic_sigma)
    samples = tf.compat.v1.reshape(
        samples,
        [1, settings['seq_length'], settings['num_generated_features']])
    Kxx, Kxy, Kyy, wts = mmd._mix_rbf_kernel(G_samples,
                                             samples,
                                             sigmas=tf.constant(
                                                 value=heuristic_sigma,
                                                 shape=(1, 1)))
    similarity_per_sample = tf.compat.v1.diag_part(Kxy)
    reconstruction_error_per_sample = 1 - similarity_per_sample
    # reconstruction_error_per_sample = tf.reduce_sum((tf.nn.l2_normalize(G_samples, dim=1) - tf.nn.l2_normalize(samples, dim=1))**2, axis=[1,2])
    similarity = tf.compat.v1.reduce_mean(similarity_per_sample)
    reconstruction_error = 1 - similarity
    # updater
    # solver = tf.compat.v1.train.AdamOptimizer().minimize(reconstruction_error_per_sample, var_list=[Z])
    # solver = tf.train.RMSPropOptimizer(learning_rate=500).minimize(reconstruction_error, var_list=[Z])
    solver = tf.compat.v1.train.RMSPropOptimizer(learning_rate=0.1).minimize(
        reconstruction_error_per_sample, var_list=[Z])
    # solver = tf.train.MomentumOptimizer(learning_rate=0.1, momentum=0.9).minimize(reconstruction_error_per_sample, var_list=[Z])

    grad_Z = tf.compat.v1.gradients(reconstruction_error_per_sample, Z)[0]
    grad_per_Z = tf.compat.v1.norm(grad_Z, axis=(1, 2))
    grad_norm = tf.compat.v1.reduce_mean(grad_per_Z)
    # solver = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(reconstruction_error, var_list=[Z])
    print('Finding latent state corresponding to samples...')

    sess = tf.compat.v1.Session()
    sess.run(tf.compat.v1.global_variables_initializer())
    with tf.compat.v1.Session() as sess:
        # graph = tf.compat.v1.Graph()
        # graphDef = graph.as_graph_def()
        sess.run(tf.compat.v1.global_variables_initializer())
        error = sess.run(reconstruction_error, feed_dict=fd)
        g_n = sess.run(grad_norm, feed_dict=fd)
        # print(g_n)
        i = 0
        if not n_iter is None:
            while i < n_iter:
                _ = sess.run(solver, feed_dict=fd)
                error = sess.run(reconstruction_error, feed_dict=fd)
                i += 1
        else:
            if not g_tolerance is None:
                while g_n > g_tolerance:
                    _ = sess.run(solver, feed_dict=fd)
                    error, g_n = sess.run([reconstruction_error, grad_norm],
                                          feed_dict=fd)
                    i += 1
                    print(error, g_n)
                    if i > max_iter:
                        break
            else:
                while np.abs(error) > e_tolerance:
                    _ = sess.run(solver, feed_dict=fd)
                    error = sess.run(reconstruction_error, feed_dict=fd)
                    i += 1
                    # print(error)
                    if i > max_iter:
                        break
        Zs = sess.run(Z, feed_dict=fd)
        Gs = sess.run(G_samples, feed_dict={Z: Zs})
        error_per_sample = sess.run(reconstruction_error_per_sample,
                                    feed_dict=fd)
        print('Z found in', i, 'iterations with final reconstruction error of',
              error)
    tf.compat.v1.reset_default_graph()

    return Gs, Zs, error_per_sample, heuristic_sigma
Beispiel #3
0
        fd = None
        Zs = tf.get_variable(name='Zs',
                             shape=[batch_size, seq_length, latent_dim],
                             initializer=tf.random_normal_initializer())
        aaa = Zs.shape
        print('Zs:{}'.format(aaa))
        sess.run(tf.global_variables_initializer())
        Z_latent = sess.run(Zs, feed_dict=fd)
        # Zs = model.sample_Z(batch_size, seq_length, latent_dim, use_time)
        # create outputs
        gs_sample = sess.run(G_sample, feed_dict={Z: Z_latent})
        gs_sample = np.float32(gs_sample[:, :, :])
        # gs_sample = model.generator(Zs, **generator_settings, reuse=True, c=CG)

        # define loss mmd-based loss
        heuristic_sigma = mmd.median_pairwise_distance_o(
            ts_sample)  # this is noisy
        print('heuristic_sigma:', heuristic_sigma)
        Kxx, Kxy, Kyy, wts = mmd._mix_rbf_kernel(gs_sample,
                                                 ts_sample,
                                                 sigmas=tf.constant(
                                                     value=heuristic_sigma,
                                                     shape=(1, 1)))
        similarity_per_sample = tf.diag_part(Kxy)
        reconstruction_error_per_sample = 1 - similarity_per_sample
        similarity = tf.reduce_mean(similarity_per_sample)
        reconstruction_error = 1 - similarity

        # updater
        # from differential_privacy.dp_sgd.dp_optimizer import dp_optimizer
        # from differential_privacy.dp_sgd.dp_optimizer import sanitizer
        # from differential_privacy.privacy_accountant.tf import accountant