def make_special_conv4_l1(latent_dim, filter_length=3, num_filters=256):
    x_shape = (128, 54)
    huzz = HuzzerSource()
    data_pipeline = OneHotVecotorizer(TokenDatasource(huzz), x_shape[1],
                                      x_shape[0])

    decoder_input = tf.placeholder(tf.float32,
                                   shape=(1, latent_dim),
                                   name='decoder_input')
    encoder_input = tf.placeholder(tf.float32,
                                   shape=(1, *x_shape),
                                   name='encoder_input')
    with conv_arg_scope2():
        encoder_output, _, dense_layer_size = build_special_conv4_encoder(
            encoder_input,
            latent_dim,
            num_filters,
            filter_length=filter_length)
        decoder_output = build_special_conv4_decoder(
            decoder_input,
            x_shape,
            num_filters,
            filter_length=filter_length,
            dense_layer_size=dense_layer_size)
        decoder_output = tf.nn.softmax(decoder_output, dim=-1)
        decoder_output = tf.squeeze(decoder_output, 0)

    return data_pipeline, encoder_input, encoder_output, decoder_input, decoder_output
Esempio n. 2
0
def test_one_hot():
    expected_shape = (256, 54)
    one_hotter = OneHotVecotorizer(TokenDatasource(HuzzerSource()), 54, 256)

    # check deteminism and dimensions
    for i in range(20, 60):
        x = one_hotter['{0}'.format(i)]
        y = one_hotter['{0}'.format(i)]
        assert np.array_equal(
            x, y), 'Accessing same element produces different outcome.'
        assert x.shape == expected_shape, 'Incorrect shape for output.'
def make_conv1():
    latent_dim = 16
    x_shape = (128, 54)
    huzz = HuzzerSource()
    data_pipeline = OneHotVecotorizer(TokenDatasource(huzz), x_shape[1],
                                      x_shape[0])

    decoder_input = tf.placeholder(tf.float32,
                                   shape=(1, latent_dim),
                                   name='decoder_input')
    with conv_arg_scope():
        encoder_input = tf.placeholder(tf.float32,
                                       shape=(1, *x_shape),
                                       name='encoder_input')
        encoder_output, _ = build_conv1_encoder(encoder_input, latent_dim)

        decoder_output = build_conv1_decoder(decoder_input, x_shape)
        decoder_output = tf.reshape(decoder_output, x_shape)

    return data_pipeline, encoder_input, encoder_output, decoder_input, decoder_output
def make_simple(latent_dim, sequence_length):
    huzz = HuzzerSource()
    data_pipeline = OneHotVecotorizer(TokenDatasource(huzz), 54, 256)

    x_shape = (sequence_length, 54)
    encoder_input = tf.placeholder(tf.float32,
                                   shape=(1, *x_shape),
                                   name='encoder_input')
    x_flat = slim.flatten(encoder_input)
    z = slim.fully_connected(x_flat,
                             latent_dim,
                             scope='encoder_output',
                             activation_fn=tf.tanh)
    encoder_output = tf.identity(z, 'this_is_output')

    decoder_input = tf.placeholder(tf.float32,
                                   shape=(1, latent_dim),
                                   name='decoder_input')
    decoder_output = build_decoder(decoder_input, x_shape)
    return data_pipeline, encoder_input, encoder_output, decoder_input, decoder_output
def make_special_conv():
    latent_dim = 64
    x_shape = (128, 54)
    huzz = HuzzerSource()
    data_pipeline = OneHotVecotorizer(TokenDatasource(huzz), x_shape[1],
                                      x_shape[0])

    decoder_input = tf.placeholder(tf.float32,
                                   shape=(1, latent_dim),
                                   name='decoder_input')
    encoder_input = tf.placeholder(tf.float32,
                                   shape=(1, *x_shape),
                                   name='encoder_input')
    with conv_arg_scope():
        encoder_output, _ = build_special_conv_encoder(encoder_input,
                                                       latent_dim)
        decoder_output = build_special_conv_decoder(decoder_input, x_shape)
        decoder_output = tf.nn.softmax(decoder_output, dim=-1)
        decoder_output = tf.squeeze(decoder_output, 0)

    return data_pipeline, encoder_input, encoder_output, decoder_input, decoder_output
def make_simple_sss(latent_dim=32):
    x_shape = (128, 54)
    huzz = HuzzerSource()
    data_pipeline = OneHotVecotorizer(TokenDatasource(huzz), x_shape[1],
                                      x_shape[0])

    encoder_input = tf.placeholder(tf.float32,
                                   shape=(1, *x_shape),
                                   name='encoder_input')
    x_flat = slim.flatten(encoder_input)
    z = slim.fully_connected(x_flat,
                             latent_dim,
                             scope='encoder_output',
                             activation_fn=tf.tanh)

    decoder_input = tf.placeholder(tf.float32,
                                   shape=(1, latent_dim),
                                   name='decoder_input')
    decoder_output = build_decoder(decoder_input,
                                   x_shape,
                                   activation=tf.nn.relu6)
    decoder_output = tf.reshape(decoder_output, x_shape)
    return data_pipeline, encoder_input, z, decoder_input, decoder_output