Ejemplo n.º 1
0
def invert2(settings,
            samples,
            para_path,
            g_tolerance=None,
            e_tolerance=0.1,
            n_iter=None,
            max_iter=10000,
            heuristic_sigma=None):
    """
    Return the latent space points corresponding to a set of a samples (from gradient descent)
    Note: this function is designed for ONE sample generation
    """
    # num_samples = samples.shape[0]
    # cast samples to float32

    samples = np.float32(samples)

    # get the model
    # if settings is a string, assume it's an identifier and load
    if type(settings) == str:
        settings = json.load(
            open('./experiments/settings/' + settings + '.txt', 'r'))

    # get parameters
    parameters = model.load_parameters(para_path)
    Z = tf.compat.v1.get_variable(
        name='Z',
        shape=[1, settings['seq_length'], settings['latent_dim']],
        initializer=tf.compat.v1.random_normal_initializer())
    # create outputs
    G_samples = generator_o(Z,
                            settings['hidden_units_g'],
                            settings['seq_length'],
                            1,
                            settings['num_generated_features'],
                            reuse=False,
                            parameters=parameters)
    # generator_vars = ['hidden_units_g', 'seq_length', 'batch_size', 'num_generated_features', 'cond_dim', 'learn_scale']
    # generator_settings = dict((k, settings[k]) for k in generator_vars)
    # G_samples = model.generator(Z, **generator_settings, reuse=True)
    # G_samples = model.generator(Z, settings['hidden_units_g'], settings['seq_length'], 1, settings['num_generated_features'], reuse=False, parameters=parameters)

    fd = None

    # define loss mmd-based loss
    if heuristic_sigma is None:
        heuristic_sigma = mmd.median_pairwise_distance_o(
            samples)  # this is noisy
        print('heuristic_sigma:', heuristic_sigma)
    samples = tf.compat.v1.reshape(
        samples,
        [1, settings['seq_length'], settings['num_generated_features']])
    Kxx, Kxy, Kyy, wts = mmd._mix_rbf_kernel(G_samples,
                                             samples,
                                             sigmas=tf.constant(
                                                 value=heuristic_sigma,
                                                 shape=(1, 1)))
    similarity_per_sample = tf.compat.v1.diag_part(Kxy)
    reconstruction_error_per_sample = 1 - similarity_per_sample
    # reconstruction_error_per_sample = tf.reduce_sum((tf.nn.l2_normalize(G_samples, dim=1) - tf.nn.l2_normalize(samples, dim=1))**2, axis=[1,2])
    similarity = tf.compat.v1.reduce_mean(similarity_per_sample)
    reconstruction_error = 1 - similarity
    # updater
    # solver = tf.compat.v1.train.AdamOptimizer().minimize(reconstruction_error_per_sample, var_list=[Z])
    # solver = tf.train.RMSPropOptimizer(learning_rate=500).minimize(reconstruction_error, var_list=[Z])
    solver = tf.compat.v1.train.RMSPropOptimizer(learning_rate=0.1).minimize(
        reconstruction_error_per_sample, var_list=[Z])
    # solver = tf.train.MomentumOptimizer(learning_rate=0.1, momentum=0.9).minimize(reconstruction_error_per_sample, var_list=[Z])

    grad_Z = tf.compat.v1.gradients(reconstruction_error_per_sample, Z)[0]
    grad_per_Z = tf.compat.v1.norm(grad_Z, axis=(1, 2))
    grad_norm = tf.compat.v1.reduce_mean(grad_per_Z)
    # solver = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(reconstruction_error, var_list=[Z])
    print('Finding latent state corresponding to samples...')

    sess = tf.compat.v1.Session()
    sess.run(tf.compat.v1.global_variables_initializer())
    with tf.compat.v1.Session() as sess:
        # graph = tf.compat.v1.Graph()
        # graphDef = graph.as_graph_def()
        sess.run(tf.compat.v1.global_variables_initializer())
        error = sess.run(reconstruction_error, feed_dict=fd)
        g_n = sess.run(grad_norm, feed_dict=fd)
        # print(g_n)
        i = 0
        if not n_iter is None:
            while i < n_iter:
                _ = sess.run(solver, feed_dict=fd)
                error = sess.run(reconstruction_error, feed_dict=fd)
                i += 1
        else:
            if not g_tolerance is None:
                while g_n > g_tolerance:
                    _ = sess.run(solver, feed_dict=fd)
                    error, g_n = sess.run([reconstruction_error, grad_norm],
                                          feed_dict=fd)
                    i += 1
                    print(error, g_n)
                    if i > max_iter:
                        break
            else:
                while np.abs(error) > e_tolerance:
                    _ = sess.run(solver, feed_dict=fd)
                    error = sess.run(reconstruction_error, feed_dict=fd)
                    i += 1
                    # print(error)
                    if i > max_iter:
                        break
        Zs = sess.run(Z, feed_dict=fd)
        Gs = sess.run(G_samples, feed_dict={Z: Zs})
        error_per_sample = sess.run(reconstruction_error_per_sample,
                                    feed_dict=fd)
        print('Z found in', i, 'iterations with final reconstruction error of',
              error)
    tf.compat.v1.reset_default_graph()

    return Gs, Zs, error_per_sample, heuristic_sigma
Ejemplo n.º 2
0
def invert(settings, epoch, samples, g_tolerance=None, e_tolerance=0.1,
        n_iter=None, max_iter=10000, heuristic_sigma=None, C_samples=None):
    """
    Return the latent space points corresponding to a set of a samples
    ( from gradient descent )
    """
    # cast samples to float32
    samples = np.float32(samples[:, :, :])
    # get the model
    if type(settings) == str:
        settings = json.load(open('./experiments/settings/' + settings + '.txt', 'r'))
    num_samples = samples.shape[0]
    print('Inverting', num_samples, 'samples using model', settings['identifier'], 'at epoch', epoch,)
    if not g_tolerance is None:
        print('until gradient norm is below', g_tolerance)
    else:
        print('until error is below', e_tolerance)
    # get parameters
    parameters = load_parameters(settings['identifier'] + '_' + str(epoch))
    # assertions
    assert samples.shape[2] == settings['num_generated_features']
    # create VARIABLE Z
    Z = tf.get_variable(name='Z', shape=[num_samples, settings['seq_length'],
                        settings['latent_dim']],
                        initializer=tf.random_normal_initializer())
    if C_samples is None:
        # create outputs
        G_samples = generator(Z, settings['hidden_units_g'], settings['seq_length'],
                              num_samples, settings['num_generated_features'],
                              reuse=False, parameters=parameters)
        fd = None
    else:
        CG = tf.placeholder(tf.float32, [num_samples, settings['cond_dim']])
        assert C_samples.shape[0] == samples.shape[0]
        # CGAN
        G_samples = generator(Z, settings['hidden_units_g'], settings['seq_length'], 
                              num_samples, settings['num_generated_features'], 
                              reuse=False, parameters=parameters, cond_dim=settings['cond_dim'], c=CG)
        fd = {CG: C_samples}

    # define loss
    if heuristic_sigma is None:
        heuristic_sigma = mmd.median_pairwise_distance(samples)     # this is noisy
        print('heuristic_sigma:', heuristic_sigma)
    Kxx, Kxy, Kyy, wts = mmd._mix_rbf_kernel(G_samples, samples, sigmas=tf.constant(value=heuristic_sigma, shape=(1, 1)))
    similarity_per_sample = tf.diag_part(Kxy)
    reconstruction_error_per_sample = 1 - similarity_per_sample
    #reconstruction_error_per_sample = tf.reduce_sum((tf.nn.l2_normalize(G_samples, dim=1) - tf.nn.l2_normalize(samples, dim=1))**2, axis=[1,2])
    similarity = tf.reduce_mean(similarity_per_sample)
    reconstruction_error = 1 - similarity
    # updater
#    solver = tf.train.AdamOptimizer().minimize(reconstruction_error_per_sample, var_list=[Z])
    #solver = tf.train.RMSPropOptimizer(learning_rate=500).minimize(reconstruction_error, var_list=[Z])
    solver = tf.train.RMSPropOptimizer(learning_rate=0.1).minimize(reconstruction_error_per_sample, var_list=[Z])
    #solver = tf.train.MomentumOptimizer(learning_rate=0.1, momentum=0.9).minimize(reconstruction_error_per_sample, var_list=[Z])

    grad_Z = tf.gradients(reconstruction_error_per_sample, Z)[0]
    grad_per_Z = tf.norm(grad_Z, axis=(1, 2))
    grad_norm = tf.reduce_mean(grad_per_Z)
    #solver = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(reconstruction_error, var_list=[Z])
    print('Finding latent state corresponding to samples...')
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        error = sess.run(reconstruction_error, feed_dict=fd)
        g_n = sess.run(grad_norm, feed_dict=fd)
        print(g_n)
        i = 0
        if not n_iter is None:
            while i < n_iter:
                _ = sess.run(solver, feed_dict=fd)
                error = sess.run(reconstruction_error, feed_dict=fd)
                i += 1
        else:
            if not g_tolerance is None:
                while g_n > g_tolerance:
                    _ = sess.run(solver, feed_dict=fd)
                    error, g_n = sess.run([reconstruction_error, grad_norm], feed_dict=fd)
                    i += 1
                    print(error, g_n)
                    if i > max_iter:
                        break
            else:
                while np.abs(error) > e_tolerance:
                    _ = sess.run(solver, feed_dict=fd)
                    error = sess.run(reconstruction_error, feed_dict=fd)
                    i += 1
                    print(error)
                    if i > max_iter:
                        break
        Zs = sess.run(Z, feed_dict=fd)
        error_per_sample = sess.run(reconstruction_error_per_sample, feed_dict=fd)
        print('Z found in', i, 'iterations with final reconstruction error of', error)
    tf.reset_default_graph()
    return Zs, error_per_sample, heuristic_sigma
Ejemplo n.º 3
0
        print('Zs:{}'.format(aaa))
        sess.run(tf.global_variables_initializer())
        Z_latent = sess.run(Zs, feed_dict=fd)
        # Zs = model.sample_Z(batch_size, seq_length, latent_dim, use_time)
        # create outputs
        gs_sample = sess.run(G_sample, feed_dict={Z: Z_latent})
        gs_sample = np.float32(gs_sample[:, :, :])
        # gs_sample = model.generator(Zs, **generator_settings, reuse=True, c=CG)

        # define loss mmd-based loss
        heuristic_sigma = mmd.median_pairwise_distance_o(
            ts_sample)  # this is noisy
        print('heuristic_sigma:', heuristic_sigma)
        Kxx, Kxy, Kyy, wts = mmd._mix_rbf_kernel(gs_sample,
                                                 ts_sample,
                                                 sigmas=tf.constant(
                                                     value=heuristic_sigma,
                                                     shape=(1, 1)))
        similarity_per_sample = tf.diag_part(Kxy)
        reconstruction_error_per_sample = 1 - similarity_per_sample
        similarity = tf.reduce_mean(similarity_per_sample)
        reconstruction_error = 1 - similarity

        # updater
        # from differential_privacy.dp_sgd.dp_optimizer import dp_optimizer
        # from differential_privacy.dp_sgd.dp_optimizer import sanitizer
        # from differential_privacy.privacy_accountant.tf import accountant

        # solver = tf.train.AdamOptimizer().minimize(reconstruction_error_per_sample, var_list=[Zs])
        # solver = tf.train.RMSPropOptimizer(learning_rate=500).minimize(reconstruction_error, var_list=[Zs])
        # solver = tf.train.RMSPropOptimizer(learning_rate=0.1).minimize(reconstruction_error_per_sample, var_list=Zs)