Exemplo n.º 1
0
 def architecture(self, input_tensor, noise='none', reuse=False):
     """Build the encoder/decoder. The input_tensor is the TF node for feeding in data."""
     encoder = models.build_encoder(input_tensor,
                                    self.architecture_args,
                                    reuse=reuse)
     noisy_encoder = self.build_noise(encoder, noise=noise, reuse=reuse)
     decoder = models.build_decoder(noisy_encoder,
                                    self.architecture_args,
                                    self.data_shape,
                                    reuse=reuse)
     return encoder, noisy_encoder, decoder
Exemplo n.º 2
0
def generate_code():
    z = tf.placeholder(tf.float32, shape=(1, 16), name='decoder_input')
    build_decoder(z, (256, 54))

    saver = tf.train.Saver()
    with tf.Session() as sess:

        # new_saver = tf.train.import_meta_graph('experiments/VAE_baseline/simple/model.ckpt-2871.meta')
        saver.restore(
            sess,
            tf.train.latest_checkpoint('experiments/VAE_baseline/simple'))

        def g(latent_rep=None):
            if latent_rep is None:
                latent_rep = np.random.normal(0, 1, 16)
            return sess.run(NAMES['decoder_output'],
                            feed_dict={
                                NAMES['decoder_input']:
                                np.reshape(latent_rep, (1, -1))
                            })

        # # latent_rep = np.zeros(16, dtype='float32')
        # latent_rep = np.array([-0.18552721,  0.17082049, 0.1876981, -0.17597072, -0.18672395,
        #  0.20680845, -0.18589398, -0.15539576, -0.17138806, -0.16285755,
        #  0.19973838, -0.20951441, -0.18218575,  0.15672302,  0.32241789,
        #  0.22256292], dtype='float32')
        generated = g()[0]
        tokens = np.argmax(generated, axis=-1)

        def token_to_string(t):
            if t == 0:
                return ''
            return TOKEN_MAP[t - 1]

        text = ' '.join([token_to_string(t) for t in tokens])
        print(text)
def main():
    # Create dataset
    content_ds = ImageDataset(CONTENT_DS_PATH, batch_size=BATCH_SIZE)
    style_ds = ImageDataset(STYLE_DS_PATH, batch_size=BATCH_SIZE)

    # Build model
    vgg19 = build_vgg19(INPUT_SHAPE, VGG_PATH)  # encoder
    decoder = build_decoder(vgg19.output.shape[1:])  # input shape == encoder output shape
    model = build_model(vgg19, decoder, INPUT_SHAPE)

    #model.load_weights(SAVE_PATH)

    # Get loss
    vgg19_relus = build_vgg19_relus(vgg19)
    loss = get_loss(vgg19, vgg19_relus, epsilon=EPSILON, style_weight=STYLE_WEIGHT, color_weight=COLOR_LOSS)

    # Train model
    train(model, content_ds, style_ds, loss, n_epochs=EPOCHS, save_path=SAVE_PATH)
def make_simple(latent_dim, sequence_length):
    huzz = HuzzerSource()
    data_pipeline = OneHotVecotorizer(TokenDatasource(huzz), 54, 256)

    x_shape = (sequence_length, 54)
    encoder_input = tf.placeholder(tf.float32,
                                   shape=(1, *x_shape),
                                   name='encoder_input')
    x_flat = slim.flatten(encoder_input)
    z = slim.fully_connected(x_flat,
                             latent_dim,
                             scope='encoder_output',
                             activation_fn=tf.tanh)
    encoder_output = tf.identity(z, 'this_is_output')

    decoder_input = tf.placeholder(tf.float32,
                                   shape=(1, latent_dim),
                                   name='decoder_input')
    decoder_output = build_decoder(decoder_input, x_shape)
    return data_pipeline, encoder_input, encoder_output, decoder_input, decoder_output
def make_simple_sss(latent_dim=32):
    x_shape = (128, 54)
    huzz = HuzzerSource()
    data_pipeline = OneHotVecotorizer(TokenDatasource(huzz), x_shape[1],
                                      x_shape[0])

    encoder_input = tf.placeholder(tf.float32,
                                   shape=(1, *x_shape),
                                   name='encoder_input')
    x_flat = slim.flatten(encoder_input)
    z = slim.fully_connected(x_flat,
                             latent_dim,
                             scope='encoder_output',
                             activation_fn=tf.tanh)

    decoder_input = tf.placeholder(tf.float32,
                                   shape=(1, latent_dim),
                                   name='decoder_input')
    decoder_output = build_decoder(decoder_input,
                                   x_shape,
                                   activation=tf.nn.relu6)
    decoder_output = tf.reshape(decoder_output, x_shape)
    return data_pipeline, encoder_input, z, decoder_input, decoder_output
Exemplo n.º 6
0
def autoencode():
    huzz = HuzzerSource()

    data_pipeline = OneHotVecotorizer(TokenDataSource(huzz), 54, 256)

    x_shape = (256, 54)
    latent_dim = 16
    x = tf.placeholder(tf.float32, shape=(1, *x_shape), name='encoder_input')
    x_flat = slim.flatten(x)
    z = slim.fully_connected(x_flat,
                             latent_dim,
                             scope='encoder_output',
                             activation_fn=tf.tanh)
    z = tf.identity(z, 'this_is_output')

    z = tf.placeholder(tf.float32, shape=(1, 16), name='decoder_input')
    build_decoder(z, (256, 54))

    with tf.Session() as sess:

        saver = tf.train.Saver()
        saver.restore(
            sess,
            tf.train.latest_checkpoint('experiments/VAE_baseline/simple'))

        def a(example_data=None):
            if example_data is None:
                key = str(randint(0, 100000000))
                code = huzz[key]
                print('INPUT CODE FOR NETWORK:')
                print(code)
                example_data = data_pipeline[key]
            example_data = np.reshape(example_data, (1, *example_data.shape))
            return sess.run(['this_is_output:0'],
                            feed_dict={NAMES['encoder_input']: example_data})

        def g(latent_rep):
            return sess.run(NAMES['decoder_output'],
                            feed_dict={NAMES['decoder_input']: latent_rep[0]})

        for i in trange(5):

            key = str(randint(0, 100000000))
            code = huzz[key]
            with open(BASEDIR + 'simple_examples/auto_{}_input.hs'.format(i),
                      'w') as f:
                f.write(code)

            example_data = data_pipeline[key]
            imsave(BASEDIR + 'simple_examples/auto_{}_input.png'.format(i),
                   example_data.astype(np.float32).T)

            latent_reps = a(example_data)
            recon = g(latent_reps)[0]

            imsave(BASEDIR + 'simple_examples/auto_{}_output.png'.format(i),
                   recon.T)

            tokens = np.argmax(recon, axis=-1)

            def token_to_string(t):
                if t == 0:
                    return ''
                return TOKEN_MAP[t - 1]

            text = ' '.join([token_to_string(t) for t in tokens])
            with open(BASEDIR + 'simple_examples/auto_{}_output.hs'.format(i),
                      'w') as f:
                f.write(text)