Ejemplo n.º 1
0
def build_pred_model(n_tokens=4, seq_length=33, enc1_units=50, pred_var=0.1):
    x = Input(shape=(seq_length, n_tokens))
    h = Flatten()(x)
    h = Dense(enc1_units, activation='elu')(h)
    h = Dense(enc1_units, activation='elu')(h)
    out = Dense(1)(h)

    model = Model(inputs=[x], outputs=[out])
    model.compile(optimizer='adam',
                  loss=[get_gaussian_nll(pred_var)],
                  metrics=['mse'])
    return model
Ejemplo n.º 2
0
def build_pred_vae_model(latent_dim,
                         n_tokens=4,
                         seq_length=33,
                         enc1_units=50,
                         eps_std=1.,
                         pred_var=0.1,
                         learn_uncertainty=False):
    """Returns a compiled VAE that makes supervised predictions from its latent
    space. For use with the Gomez-Bombarelli optimization method"""
    model = SimpleSupervisedVAE(input_shape=(
        seq_length,
        n_tokens,
    ),
                                latent_dim=latent_dim,
                                pred_dim=1,
                                pred_var=pred_var,
                                learn_uncertainty=learn_uncertainty)

    # set encoder layers:
    model.encoderLayers_ = [
        Dense(units=enc1_units, activation='elu', name='e2'),
    ]

    # set decoder layers:
    model.decoderLayers_ = [
        Dense(units=enc1_units, activation='elu', name='d1'),
        Dense(units=n_tokens * seq_length, name='d3'),
        Reshape((seq_length, n_tokens), name='d4'),
        Dense(units=n_tokens, activation='softmax', name='d5'),
    ]

    # set predictor layers:
    model.predictorLayers_ = [
        Dense(units=20, activation='elu', name='p1'),
    ]

    # build models:
    kl_scale = K.variable(1.)
    model.build_encoder()
    model.build_decoder(decode_activation='softmax')
    model.build_predictor()
    model.build_vae(epsilon_std=eps_std, kl_scale=kl_scale)

    y_var = K.exp(model.vae_.outputs[3])

    losses = [
        summed_categorical_crossentropy, identity_loss,
        get_gaussian_nll(y_var), zero_loss
    ]

    model.compile(optimizer='adam', loss=losses, metrics=['mse'])
    return model