def test_train_with_gradient_clipping(p_model, q_model): train(p_model, q_model, cross_entropy, epochs=3, clip_gradients=1.) train(p_model, q_model, perplexity, epochs=3, clip_gradients=1.) train(p_model, q_model, forward_kl, epochs=3, clip_gradients=1.) train(p_model, q_model, reverse_kl, epochs=3, clip_gradients=1.) train(p_model, q_model, js_divergence, epochs=3, clip_gradients=1.)
def test_train(p_model, q_model): train(p_model, q_model, cross_entropy, epochs=3) train(p_model, q_model, perplexity, epochs=3) train(p_model, q_model, forward_kl, epochs=3) train(p_model, q_model, reverse_kl, epochs=3) train(p_model, q_model, js_divergence, epochs=3)
def test_gan_train(gan): if gan.n_dims == 1: q_model = MixtureModel([Normal([-0.5],[[1.0]]), Normal([0.5],[[1.0]])], [0.5, 0.5]) p_model = MixtureModel([Normal([2.3], [[2.2]]), Normal([-2.3], [[2.2]])], [0.5, 0.5]) else: q_model = MixtureModel([Normal([0., 0.], [1., 0., 0., 1.0]), Normal([0., 0.], [1., 0., 0., 1.0])], [0.25, 0.75]) p_model = MixtureModel([Normal([0., 0.], [1., 0., 0., 1.0]), Normal([0., 0.], [1., 0., 0., 1.0])], [0.25, 0.75]) train(p_model, q_model, gan, optimizer="RMSprop", epochs=3, lr=1e-3, batch_size=512) X = p_model.sample(100) gan.classify(X)
def fit(self, X, variational_dist=None, elbo_kwargs={}, **kwargs): if variational_dist is None: variational_dist = PPCA_Variational_V2(self) data = Data(X) stats = train(data, self, ELBO(variational_dist, **elbo_kwargs), **kwargs) return stats
def fit(self, x, **kwargs): data = Data(x) stats = train(data, self.model, self.criterion, optimizer='RMSprop', track_parameters=False, **kwargs) return stats
def test_plot_stats(p_model, q_model, n_stats): stats = train(p_model, q_model, forward_kl, epochs=10, track_parameters=True) assert len(stats.data['loss']) == 10 assert len(stats.data.keys()) == n_stats + 1 for key in stats.data.keys(): assert len(stats.data[key]) == 10 if isinstance(p_model, Normal): plot_stats(stats, goals=[p_model.loc, p_model.scale]) elif isinstance(p_model, Exponential): plot_stats(stats, goals=[p_model.rate]) elif isinstance(p_model, Beta): plot_stats(stats, goals=[p_model.alpha, p_model.beta]) elif isinstance(p_model, StudentT): plot_stats(stats, goals=[p_model.df, p_model.loc, p_model.scale]) else: plot_stats(stats) plt.close()
def fit(self, x, **kwargs): data = Data(x) stats = train(data, self.model, cross_entropy, **kwargs) return stats
def fit(self, x, **kwargs): data = Data(x) return train(data, self, self.criterion, **kwargs)
def fit(self, R, **kwargs): data = Data(R.view(-1, self.N * self.M)) stats = train(data, self, cross_entropy, **kwargs) return stats
def test_train_with_penalty(p_model, q_model): train(p_model, q_model, cross_entropy, epochs=3, l1_penalty=0.) train(p_model, q_model, cross_entropy, epochs=3, l2_penalty=0.) train(p_model, q_model, forward_kl, epochs=3, l1_penalty=0.) train(p_model, q_model, forward_kl, epochs=3, l2_penalty=0.) train(p_model, q_model, cross_entropy, epochs=3, l1_penalty=2., l2_penalty=2.) train(p_model, q_model, forward_kl, epochs=3, l1_penalty=2., l2_penalty=2.)
def fit(self, x, use_elbo=True, **kwargs): data = Data(x) if use_elbo: return train(data, self, self.criterion, **kwargs) return train(data, self, cross_entropy, **kwargs)