def test_vader_nonrecur(self): NUM_OF_TIME_POINTS = 7 X_train, y_train = generate_x_y_for_nonrecur(NUM_OF_TIME_POINTS, 400) # Run VaDER non-recurrently (ordinary VAE with GM prior) # noinspection PyTypeChecker vader = VADER(X_train=X_train, y_train=y_train, n_hidden=[12, 2], k=2, learning_rate=1e-3, output_activation=None, recurrent=False, batch_size=16) # pre-train without latent loss vader.pre_fit(n_epoch=10, verbose=True) # train with latent loss vader.fit(n_epoch=10, verbose=True) # get the clusters clustering = vader.cluster(X_train) assert any(clustering) assert len(clustering) == len(X_train) # get the re-constructions prediction = vader.predict(X_train) assert prediction.shape == X_train.shape # compute the loss given the network loss = vader.get_loss(X_train) assert loss assert "reconstruction_loss" in loss assert "latent_loss" in loss assert loss["reconstruction_loss"] >= 0 assert loss["latent_loss"] >= 0 # generate some samples NUM_OF_GENERATED_SAMPLES = 10 generated_samples = vader.generate(NUM_OF_GENERATED_SAMPLES) assert generated_samples assert "clusters" in generated_samples assert "samples" in generated_samples assert len(generated_samples["clusters"]) == NUM_OF_GENERATED_SAMPLES assert generated_samples["samples"].shape == (NUM_OF_GENERATED_SAMPLES, NUM_OF_TIME_POINTS)
def test2(): x_train, y_train = get_dete_for_seconed_test() vader = VADER(x_train=x_train, y_train=y_train, n_hidden=[12, 2], k=2, learning_rate=1e-3, output_activation=None, recurrent=False, batch_size=16) # pre-train without latent loss vader.pre_fit(n_epoch=50, verbose=True) # train with latent loss vader.fit(n_epoch=50, verbose=True) # get the clusters c = vader.cluster(x_train) # get the re-constructions p = vader.predict(x_train) # compute the loss given the network l = vader.get_loss(x_train) # generate some samples g = vader.generate(10) # compute the loss given the network l = vader.get_loss(x_train)
a1 = np.random.multivariate_normal(mu1, sigma, ns) a2 = np.random.multivariate_normal(mu2, sigma, ns) X_train = np.concatenate((a1, a2), axis=0) y_train = np.repeat([0, 1], ns) ii = np.random.permutation(ns * 2) X_train = X_train[ii, :] y_train = y_train[ii] # normalize (better for fitting) X_train = (X_train - np.mean(X_train)) / np.std(X_train) vader = VADER(X_train=X_train, y_train=y_train, n_hidden=[12, 2], k=2, learning_rate=1e-3, output_activation=None, recurrent=False, batch_size=16) # pre-train without latent loss vader.pre_fit(n_epoch=50, verbose=True) # train with latent loss vader.fit(n_epoch=50, verbose=True) # get the clusters c = vader.cluster(X_train) # get the re-constructions p = vader.predict(X_train) # compute the loss given the network l = vader.get_loss(X_train) # generate some samples g = vader.generate(10)