Example #1
0
#%% Train the model
# with tf.device('/gpu:0'):
train(
    m1,
    X_train,
    y_train,
    X_test,
    y_test,
    num=100,
    samples=1,
    epochs=1000,
    iter_train=1,
    num_inference=100,
    save="model_w",
    batch=True,
    temperature_function=lambda x: exponential_multiplicative_cooling(x, 1.0, 0.5, 0.99),
    save_results="./gumblevae_results.txt",
    beta_z_method=z_cooling,
    beta_y_method=y_cooling,
    beta_d_method=d_cooling,
)


#%%
qy_g_x__logit, qy_g_x__prob = m1.graph_qy_g_x(X_train[[9]])
qy_g_x__ohe = np.array([m1.graph_qy_g_x_ohe(qy_g_x__prob, 0.005).numpy()[0] for i in range(1000)])

#%%
logit_df = pd.DataFrame()
for col in range(qy_g_x__ohe.shape[1]):
    temp_df = pd.DataFrame({"value": qy_g_x__ohe[:, col]})
Example #2
0
def train_test_model(run_id, hparams, X_train, y_train, X_test, y_test):

    # hp.hparams(hparams) # record the values used in this trial
    seed = hparams[HP_seed]
    tf.random.set_seed(seed)
    params = {
        "components": hparams[HP_components],
        "input_dimension": X_train.shape[1],
        "embedding_dimensions": eval(hparams[HP_encoder_dims])[0],
        "latent_dimensions": eval(hparams[HP_encoder_dims])[1],
        "mixture_embedding_dimensions": eval(hparams[HP_mixture_dims])[0],
        "mixture_latent_dimensions": eval(hparams[HP_mixture_dims])[1],
        "embedding_activations": tf.nn.relu,
        "kind": "binary",
        "learning_rate": 1.0,
        "gradient_clip": None,
        "bn_before": True if hparams[HP_bn] == "before" else False,
        "bn_after": True if hparams[HP_bn] == "after" else False,
        "categorical_epsilon": 0.0,
        "reconstruction_epsilon": 0.0,
        "latent_epsilon": 0.0,
        "latent_prior_epsilon": 0.0,
        "z_kl_lambda": 1.0,
        "c_kl_lambda": 1.0,
        "cat_latent_bias_initializer": None,
        "connected_weights": hparams[HP_connected_weights],
        # "optimizer":tf.keras.optimizers.Adam(lr_schedule, epsilon=1e-16),
        "optimizer": tf.keras.optimizers.Adam(1e-3, epsilon=1e-16),
        "categorical_latent_embedding_dropout": 0.2,
        "mixture_latent_mu_embedding_dropout": 0.2,
        "mixture_latent_var_embedding_dropout": 0.2,
        "mixture_posterior_mu_dropout": 0.2,
        "mixture_posterior_var_dropout": 0.2,
        "recon_dropouut": 0.2,
        #'latent_fixed_var': 0.01,
    }

    z_cooling = lambda: 1.0
    y_cooling = lambda: 1.0

    m1 = model.Gmvae(**params)

    params["embedding_activations"] = "relu"
    params["optimizer"] = "adam_1e-3_1e-9"

    param_string = (
        "/seed__" + str(seed) + "/" +
        "/".join([str(k) + "_" + str(v) for k, v in params.items()]))

    train(
        m1,
        X_train,
        y_train,
        X_test,
        y_test,
        num=100,
        samples=hparams[HP_samples],
        epochs=110,
        iter_train=1,
        num_inference=1000,
        save="model_w_5",
        batch=True,
        temperature_function=lambda x: exponential_multiplicative_cooling(
            x, 1.0, 0.5, 0.99),
        # temperature_function = lambda x: 0.1
        save_results="./gumble_results.txt",
        beta_z_method=z_cooling,
        beta_y_method=y_cooling,
        tensorboard=run_id,
    )

    idx_tr = m1.predict(X_train).numpy().argmax(1)
    idx_te = m1.predict(X_test).numpy().argmax(1)

    ami_tr = adjusted_mutual_info_score(y_train,
                                        idx_tr,
                                        average_method="arithmetic")
    ami_te = adjusted_mutual_info_score(y_test,
                                        idx_te,
                                        average_method="arithmetic")

    attch_te = np.array(np.unique(idx_te,
                                  return_counts=True)[1]).max() / len(idx_te)

    purity_train = purity_score(y_train, idx_tr)
    purity_test = purity_score(y_test, idx_te)

    return ami_tr, ami_te, purity_train, purity_test